2 * arch/s390/kernel/vtime.c
3 * Virtual cpu timer based timer functions.
6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/time.h>
13 #include <linux/delay.h>
14 #include <linux/init.h>
15 #include <linux/smp.h>
16 #include <linux/types.h>
17 #include <linux/timex.h>
18 #include <linux/notifier.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/rcupdate.h>
21 #include <linux/posix-timers.h>
23 #include <asm/s390_ext.h>
24 #include <asm/timer.h>
26 static ext_int_info_t ext_int_info_timer;
27 DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
29 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
31 * Update process times based on virtual cpu times stored by entry.S
32 * to the lowcore fields user_timer, system_timer & steal_clock.
34 void account_tick_vtime(struct task_struct *tsk)
40 timer = S390_lowcore.last_update_timer;
41 clock = S390_lowcore.last_update_clock;
42 asm volatile (" STPT %0\n" /* Store current cpu timer value */
43 " STCK %1" /* Store current tod clock value */
44 : "=m" (S390_lowcore.last_update_timer),
45 "=m" (S390_lowcore.last_update_clock) );
46 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
47 S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock;
49 cputime = S390_lowcore.user_timer >> 12;
50 rcu_user_flag = cputime != 0;
51 S390_lowcore.user_timer -= cputime << 12;
52 S390_lowcore.steal_clock -= cputime << 12;
53 account_user_time(tsk, cputime);
55 cputime = S390_lowcore.system_timer >> 12;
56 S390_lowcore.system_timer -= cputime << 12;
57 S390_lowcore.steal_clock -= cputime << 12;
58 account_system_time(tsk, HARDIRQ_OFFSET, cputime);
60 cputime = S390_lowcore.steal_clock;
61 if ((__s64) cputime > 0) {
63 S390_lowcore.steal_clock -= cputime << 12;
64 account_steal_time(tsk, cputime);
68 if (rcu_pending(smp_processor_id()))
69 rcu_check_callbacks(smp_processor_id(), rcu_user_flag);
71 run_posix_cpu_timers(tsk);
75 * Update process times based on virtual cpu times stored by entry.S
76 * to the lowcore fields user_timer, system_timer & steal_clock.
78 void account_vtime(struct task_struct *tsk)
83 timer = S390_lowcore.last_update_timer;
84 asm volatile (" STPT %0" /* Store current cpu timer value */
85 : "=m" (S390_lowcore.last_update_timer) );
86 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
88 cputime = S390_lowcore.user_timer >> 12;
89 S390_lowcore.user_timer -= cputime << 12;
90 S390_lowcore.steal_clock -= cputime << 12;
91 account_user_time(tsk, cputime);
93 cputime = S390_lowcore.system_timer >> 12;
94 S390_lowcore.system_timer -= cputime << 12;
95 S390_lowcore.steal_clock -= cputime << 12;
96 account_system_time(tsk, 0, cputime);
100 * Update process times based on virtual cpu times stored by entry.S
101 * to the lowcore fields user_timer, system_timer & steal_clock.
103 void account_system_vtime(struct task_struct *tsk)
108 timer = S390_lowcore.last_update_timer;
109 asm volatile (" STPT %0" /* Store current cpu timer value */
110 : "=m" (S390_lowcore.last_update_timer) );
111 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
113 cputime = S390_lowcore.system_timer >> 12;
114 S390_lowcore.system_timer -= cputime << 12;
115 S390_lowcore.steal_clock -= cputime << 12;
116 account_system_time(tsk, 0, cputime);
119 static inline void set_vtimer(__u64 expires)
123 asm volatile (" STPT %0\n" /* Store current cpu timer value */
124 " SPT %1" /* Set new value immediatly afterwards */
125 : "=m" (timer) : "m" (expires) );
126 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
127 S390_lowcore.last_update_timer = expires;
129 /* store expire time for this CPU timer */
130 per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
133 static inline void set_vtimer(__u64 expires)
135 S390_lowcore.last_update_timer = expires;
136 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
138 /* store expire time for this CPU timer */
139 per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
143 static void start_cpu_timer(void)
145 struct vtimer_queue *vt_list;
147 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
149 /* CPU timer interrupt is pending, don't reprogramm it */
150 if (vt_list->idle & 1LL<<63)
153 if (!list_empty(&vt_list->list))
154 set_vtimer(vt_list->idle);
157 static void stop_cpu_timer(void)
159 struct vtimer_queue *vt_list;
161 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
164 if (list_empty(&vt_list->list)) {
165 vt_list->idle = VTIMER_MAX_SLICE;
169 /* store the actual expire value */
170 asm volatile ("STPT %0" : "=m" (vt_list->idle));
173 * If the CPU timer is negative we don't reprogramm
174 * it because we will get instantly an interrupt.
176 if (vt_list->idle & 1LL<<63)
179 vt_list->offset += vt_list->to_expire - vt_list->idle;
182 * We cannot halt the CPU timer, we just write a value that
183 * nearly never expires (only after 71 years) and re-write
184 * the stored expire value if we continue the timer
187 set_vtimer(VTIMER_MAX_SLICE);
191 * Sorted add to a list. List is linear searched until first bigger
194 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
196 struct vtimer_list *event;
198 list_for_each_entry(event, head, entry) {
199 if (event->expires > timer->expires) {
200 list_add_tail(&timer->entry, &event->entry);
204 list_add_tail(&timer->entry, head);
208 * Do the callback functions of expired vtimer events.
209 * Called from within the interrupt handler.
211 static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
213 struct vtimer_queue *vt_list;
214 struct vtimer_list *event, *tmp;
215 void (*fn)(unsigned long, struct pt_regs*);
218 if (list_empty(cb_list))
221 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
223 list_for_each_entry_safe(event, tmp, cb_list, entry) {
224 fn = event->function;
228 if (!event->interval)
229 /* delete one shot timer */
230 list_del_init(&event->entry);
232 /* move interval timer back to list */
233 spin_lock(&vt_list->lock);
234 list_del_init(&event->entry);
235 list_add_sorted(event, &vt_list->list);
236 spin_unlock(&vt_list->lock);
242 * Handler for the virtual CPU timer.
244 static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code)
248 struct vtimer_queue *vt_list;
249 struct vtimer_list *event, *tmp;
250 struct list_head *ptr;
251 /* the callback queue */
252 struct list_head cb_list;
254 INIT_LIST_HEAD(&cb_list);
255 cpu = smp_processor_id();
256 vt_list = &per_cpu(virt_cpu_timer, cpu);
258 /* walk timer list, fire all expired events */
259 spin_lock(&vt_list->lock);
261 if (vt_list->to_expire < VTIMER_MAX_SLICE)
262 vt_list->offset += vt_list->to_expire;
264 list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {
265 if (event->expires > vt_list->offset)
266 /* found first unexpired event, leave */
269 /* re-charge interval timer, we have to add the offset */
271 event->expires = event->interval + vt_list->offset;
273 /* move expired timer to the callback queue */
274 list_move_tail(&event->entry, &cb_list);
276 spin_unlock(&vt_list->lock);
277 do_callbacks(&cb_list, regs);
279 /* next event is first in list */
280 spin_lock(&vt_list->lock);
281 if (!list_empty(&vt_list->list)) {
282 ptr = vt_list->list.next;
283 event = list_entry(ptr, struct vtimer_list, entry);
284 next = event->expires - vt_list->offset;
286 /* add the expired time from this interrupt handler
287 * and the callback functions
289 asm volatile ("STPT %0" : "=m" (delta));
290 delta = 0xffffffffffffffffLL - delta + 1;
291 vt_list->offset += delta;
295 next = VTIMER_MAX_SLICE;
297 spin_unlock(&vt_list->lock);
301 void init_virt_timer(struct vtimer_list *timer)
303 timer->function = NULL;
304 INIT_LIST_HEAD(&timer->entry);
305 spin_lock_init(&timer->lock);
307 EXPORT_SYMBOL(init_virt_timer);
309 static inline int vtimer_pending(struct vtimer_list *timer)
311 return (!list_empty(&timer->entry));
315 * this function should only run on the specified CPU
317 static void internal_add_vtimer(struct vtimer_list *timer)
321 struct vtimer_list *event;
322 struct vtimer_queue *vt_list;
324 vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
325 spin_lock_irqsave(&vt_list->lock, flags);
327 if (timer->cpu != smp_processor_id())
328 printk("internal_add_vtimer: BUG, running on wrong CPU");
330 /* if list is empty we only have to set the timer */
331 if (list_empty(&vt_list->list)) {
332 /* reset the offset, this may happen if the last timer was
333 * just deleted by mod_virt_timer and the interrupt
334 * didn't happen until here
341 asm volatile ("STPT %0" : "=m" (done));
343 /* calculate completed work */
344 done = vt_list->to_expire - done + vt_list->offset;
347 list_for_each_entry(event, &vt_list->list, entry)
348 event->expires -= done;
351 list_add_sorted(timer, &vt_list->list);
353 /* get first element, which is the next vtimer slice */
354 event = list_entry(vt_list->list.next, struct vtimer_list, entry);
356 set_vtimer(event->expires);
357 spin_unlock_irqrestore(&vt_list->lock, flags);
358 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
362 static inline int prepare_vtimer(struct vtimer_list *timer)
364 if (!timer->function) {
365 printk("add_virt_timer: uninitialized timer\n");
369 if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
370 printk("add_virt_timer: invalid timer expire value!\n");
374 if (vtimer_pending(timer)) {
375 printk("add_virt_timer: timer pending\n");
379 timer->cpu = get_cpu();
384 * add_virt_timer - add an oneshot virtual CPU timer
386 void add_virt_timer(void *new)
388 struct vtimer_list *timer;
390 timer = (struct vtimer_list *)new;
392 if (prepare_vtimer(timer) < 0)
396 internal_add_vtimer(timer);
398 EXPORT_SYMBOL(add_virt_timer);
401 * add_virt_timer_int - add an interval virtual CPU timer
403 void add_virt_timer_periodic(void *new)
405 struct vtimer_list *timer;
407 timer = (struct vtimer_list *)new;
409 if (prepare_vtimer(timer) < 0)
412 timer->interval = timer->expires;
413 internal_add_vtimer(timer);
415 EXPORT_SYMBOL(add_virt_timer_periodic);
418 * If we change a pending timer the function must be called on the CPU
419 * where the timer is running on, e.g. by smp_call_function_on()
421 * The original mod_timer adds the timer if it is not pending. For compatibility
422 * we do the same. The timer will be added on the current CPU as a oneshot timer.
424 * returns whether it has modified a pending timer (1) or not (0)
426 int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
428 struct vtimer_queue *vt_list;
432 if (!timer->function) {
433 printk("mod_virt_timer: uninitialized timer\n");
437 if (!expires || expires > VTIMER_MAX_SLICE) {
438 printk("mod_virt_timer: invalid expire range\n");
443 * This is a common optimization triggered by the
444 * networking code - if the timer is re-modified
445 * to be the same thing then just return:
447 if (timer->expires == expires && vtimer_pending(timer))
451 vt_list = &per_cpu(virt_cpu_timer, cpu);
453 /* disable interrupts before test if timer is pending */
454 spin_lock_irqsave(&vt_list->lock, flags);
456 /* if timer isn't pending add it on the current CPU */
457 if (!vtimer_pending(timer)) {
458 spin_unlock_irqrestore(&vt_list->lock, flags);
459 /* we do not activate an interval timer with mod_virt_timer */
461 timer->expires = expires;
463 internal_add_vtimer(timer);
467 /* check if we run on the right CPU */
468 if (timer->cpu != cpu) {
469 printk("mod_virt_timer: running on wrong CPU, check your code\n");
470 spin_unlock_irqrestore(&vt_list->lock, flags);
475 list_del_init(&timer->entry);
476 timer->expires = expires;
478 /* also change the interval if we have an interval timer */
480 timer->interval = expires;
482 /* the timer can't expire anymore so we can release the lock */
483 spin_unlock_irqrestore(&vt_list->lock, flags);
484 internal_add_vtimer(timer);
487 EXPORT_SYMBOL(mod_virt_timer);
490 * delete a virtual timer
492 * returns whether the deleted timer was pending (1) or not (0)
494 int del_virt_timer(struct vtimer_list *timer)
497 struct vtimer_queue *vt_list;
499 /* check if timer is pending */
500 if (!vtimer_pending(timer))
503 vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
504 spin_lock_irqsave(&vt_list->lock, flags);
506 /* we don't interrupt a running timer, just let it expire! */
507 list_del_init(&timer->entry);
509 /* last timer removed */
510 if (list_empty(&vt_list->list)) {
511 vt_list->to_expire = 0;
515 spin_unlock_irqrestore(&vt_list->lock, flags);
518 EXPORT_SYMBOL(del_virt_timer);
521 * Start the virtual CPU timer on the current CPU.
523 void init_cpu_vtimer(void)
525 struct vtimer_queue *vt_list;
528 /* kick the virtual timer */
529 S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
530 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
531 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
532 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
533 __ctl_store(cr0, 0, 0);
535 __ctl_load(cr0, 0, 0);
537 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
538 INIT_LIST_HEAD(&vt_list->list);
539 spin_lock_init(&vt_list->lock);
540 vt_list->to_expire = 0;
546 static int vtimer_idle_notify(struct notifier_block *self,
547 unsigned long action, void *hcpu)
560 static struct notifier_block vtimer_idle_nb = {
561 .notifier_call = vtimer_idle_notify,
564 void __init vtime_init(void)
566 /* request the cpu timer external interrupt */
567 if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
568 &ext_int_info_timer) != 0)
569 panic("Couldn't request external interrupt 0x1005");
571 if (register_idle_notifier(&vtimer_idle_nb))
572 panic("Couldn't register idle notifier");