Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
a23ba435 | 2 | * arch/sh/kernel/time_64.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | |
6c7e2a55 | 5 | * Copyright (C) 2003 - 2007 Paul Mundt |
1da177e4 LT |
6 | * Copyright (C) 2003 Richard Curnow |
7 | * | |
8 | * Original TMU/RTC code taken from sh version. | |
9 | * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka | |
10 | * Some code taken from i386 version. | |
11 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | |
a23ba435 PM |
12 | * |
13 | * This file is subject to the terms and conditions of the GNU General Public | |
14 | * License. See the file "COPYING" in the main directory of this archive | |
15 | * for more details. | |
1da177e4 | 16 | */ |
1da177e4 LT |
17 | #include <linux/errno.h> |
18 | #include <linux/rwsem.h> | |
19 | #include <linux/sched.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/param.h> | |
22 | #include <linux/string.h> | |
23 | #include <linux/mm.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/time.h> | |
26 | #include <linux/delay.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/profile.h> | |
29 | #include <linux/smp.h> | |
4940fb44 | 30 | #include <linux/module.h> |
4f3a36a7 | 31 | #include <linux/bcd.h> |
6c7e2a55 PM |
32 | #include <linux/timex.h> |
33 | #include <linux/irq.h> | |
b4eaa1cc | 34 | #include <linux/io.h> |
6c7e2a55 | 35 | #include <linux/platform_device.h> |
b4eaa1cc PM |
36 | #include <asm/cpu/registers.h> /* required by inline __asm__ stmt. */ |
37 | #include <asm/cpu/irq.h> | |
38 | #include <asm/addrspace.h> | |
1da177e4 LT |
39 | #include <asm/processor.h> |
40 | #include <asm/uaccess.h> | |
1da177e4 | 41 | #include <asm/delay.h> |
1da177e4 LT |
42 | |
43 | #define TMU_TOCR_INIT 0x00 | |
44 | #define TMU0_TCR_INIT 0x0020 | |
45 | #define TMU_TSTR_INIT 1 | |
46 | #define TMU_TSTR_OFF 0 | |
47 | ||
6c7e2a55 PM |
48 | /* Real Time Clock */ |
49 | #define RTC_BLOCK_OFF 0x01040000 | |
50 | #define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF | |
51 | #define RTC_RCR1_CIE 0x10 /* Carry Interrupt Enable */ | |
52 | #define RTC_RCR1 (rtc_base + 0x38) | |
1da177e4 LT |
53 | |
54 | /* Clock, Power and Reset Controller */ | |
55 | #define CPRC_BLOCK_OFF 0x01010000 | |
56 | #define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF | |
57 | ||
58 | #define FRQCR (cprc_base+0x0) | |
59 | #define WTCSR (cprc_base+0x0018) | |
60 | #define STBCR (cprc_base+0x0030) | |
61 | ||
62 | /* Time Management Unit */ | |
63 | #define TMU_BLOCK_OFF 0x01020000 | |
64 | #define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF | |
65 | #define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0) | |
66 | #define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1) | |
67 | #define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2) | |
68 | ||
69 | #define TMU_TOCR tmu_base+0x0 /* Byte access */ | |
70 | #define TMU_TSTR tmu_base+0x4 /* Byte access */ | |
71 | ||
72 | #define TMU0_TCOR TMU0_BASE+0x0 /* Long access */ | |
73 | #define TMU0_TCNT TMU0_BASE+0x4 /* Long access */ | |
74 | #define TMU0_TCR TMU0_BASE+0x8 /* Word access */ | |
75 | ||
1da177e4 LT |
76 | #define TICK_SIZE (tick_nsec / 1000) |
77 | ||
1da177e4 LT |
78 | static unsigned long tmu_base, rtc_base; |
79 | unsigned long cprc_base; | |
80 | ||
81 | /* Variables to allow interpolation of time of day to resolution better than a | |
82 | * jiffy. */ | |
83 | ||
84 | /* This is effectively protected by xtime_lock */ | |
85 | static unsigned long ctc_last_interrupt; | |
86 | static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */ | |
87 | ||
88 | #define CTC_JIFFY_SCALE_SHIFT 40 | |
89 | ||
90 | /* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */ | |
91 | static unsigned long long scaled_recip_ctc_ticks_per_jiffy; | |
92 | ||
93 | /* Estimate number of microseconds that have elapsed since the last timer tick, | |
0a354775 | 94 | by scaling the delta that has occurred in the CTC register. |
1da177e4 LT |
95 | |
96 | WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at | |
97 | the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this | |
98 | in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm | |
99 | probably needs to use TMU.TCNT0 instead. This will work even if the CPU is | |
100 | sleeping, though will be coarser. | |
101 | ||
102 | FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime | |
103 | is running or if the freq or tick arguments of adjtimex are modified after | |
104 | we have calibrated the scaling factor? This will result in either a jump at | |
105 | the end of a tick period, or a wrap backwards at the start of the next one, | |
106 | if the application is reading the time of day often enough. I think we | |
107 | ought to do better than this. For this reason, usecs_per_jiffy is left | |
108 | separated out in the calculation below. This allows some future hook into | |
109 | the adjtime-related stuff in kernel/timer.c to remove this hazard. | |
110 | ||
111 | */ | |
112 | ||
113 | static unsigned long usecs_since_tick(void) | |
114 | { | |
115 | unsigned long long current_ctc; | |
116 | long ctc_ticks_since_interrupt; | |
117 | unsigned long long ull_ctc_ticks_since_interrupt; | |
118 | unsigned long result; | |
119 | ||
120 | unsigned long long mul1_out; | |
121 | unsigned long long mul1_out_high; | |
122 | unsigned long long mul2_out_low, mul2_out_high; | |
123 | ||
124 | /* Read CTC register */ | |
125 | asm ("getcon cr62, %0" : "=r" (current_ctc)); | |
126 | /* Note, the CTC counts down on each CPU clock, not up. | |
127 | Note(2), use long type to get correct wraparound arithmetic when | |
128 | the counter crosses zero. */ | |
129 | ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc; | |
130 | ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt; | |
131 | ||
132 | /* Inline assembly to do 32x32x32->64 multiplier */ | |
133 | asm volatile ("mulu.l %1, %2, %0" : | |
134 | "=r" (mul1_out) : | |
135 | "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy)); | |
136 | ||
137 | mul1_out_high = mul1_out >> 32; | |
138 | ||
139 | asm volatile ("mulu.l %1, %2, %0" : | |
140 | "=r" (mul2_out_low) : | |
141 | "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy)); | |
142 | ||
143 | #if 1 | |
144 | asm volatile ("mulu.l %1, %2, %0" : | |
145 | "=r" (mul2_out_high) : | |
146 | "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy)); | |
147 | #endif | |
148 | ||
149 | result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT); | |
150 | ||
151 | return result; | |
152 | } | |
153 | ||
154 | void do_gettimeofday(struct timeval *tv) | |
155 | { | |
156 | unsigned long flags; | |
157 | unsigned long seq; | |
158 | unsigned long usec, sec; | |
159 | ||
160 | do { | |
161 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | |
162 | usec = usecs_since_tick(); | |
1da177e4 LT |
163 | sec = xtime.tv_sec; |
164 | usec += xtime.tv_nsec / 1000; | |
165 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | |
166 | ||
167 | while (usec >= 1000000) { | |
168 | usec -= 1000000; | |
169 | sec++; | |
170 | } | |
171 | ||
172 | tv->tv_sec = sec; | |
173 | tv->tv_usec = usec; | |
174 | } | |
175 | ||
176 | int do_settimeofday(struct timespec *tv) | |
177 | { | |
178 | time_t wtm_sec, sec = tv->tv_sec; | |
179 | long wtm_nsec, nsec = tv->tv_nsec; | |
180 | ||
181 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | |
182 | return -EINVAL; | |
183 | ||
184 | write_seqlock_irq(&xtime_lock); | |
185 | /* | |
186 | * This is revolting. We need to set "xtime" correctly. However, the | |
187 | * value in this location is the value at the most recent update of | |
188 | * wall time. Discover what correction gettimeofday() would have | |
189 | * made, and then undo it! | |
190 | */ | |
8ef38609 | 191 | nsec -= 1000 * usecs_since_tick(); |
1da177e4 LT |
192 | |
193 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | |
194 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | |
195 | ||
196 | set_normalized_timespec(&xtime, sec, nsec); | |
197 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | |
198 | ||
b149ee22 | 199 | ntp_clear(); |
1da177e4 LT |
200 | write_sequnlock_irq(&xtime_lock); |
201 | clock_was_set(); | |
202 | ||
203 | return 0; | |
204 | } | |
943eae03 | 205 | EXPORT_SYMBOL(do_settimeofday); |
1da177e4 | 206 | |
6c7e2a55 PM |
207 | /* Dummy RTC ops */ |
208 | static void null_rtc_get_time(struct timespec *tv) | |
1da177e4 | 209 | { |
6c7e2a55 PM |
210 | tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0); |
211 | tv->tv_nsec = 0; | |
212 | } | |
1da177e4 | 213 | |
6c7e2a55 PM |
214 | static int null_rtc_set_time(const time_t secs) |
215 | { | |
216 | return 0; | |
1da177e4 LT |
217 | } |
218 | ||
6c7e2a55 PM |
219 | void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; |
220 | int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; | |
221 | ||
1da177e4 | 222 | /* last time the RTC clock got updated */ |
6c7e2a55 | 223 | static long last_rtc_update; |
1da177e4 LT |
224 | |
225 | /* | |
226 | * timer_interrupt() needs to keep up the real-time clock, | |
227 | * as well as call the "do_timer()" routine every clocktick | |
228 | */ | |
a226d33a | 229 | static inline void do_timer_interrupt(void) |
1da177e4 LT |
230 | { |
231 | unsigned long long current_ctc; | |
960c65e8 PZ |
232 | |
233 | if (current->pid) | |
234 | profile_tick(CPU_PROFILING); | |
235 | ||
236 | /* | |
237 | * Here we are in the timer irq handler. We just have irqs locally | |
238 | * disabled but we don't know if the timer_bh is running on the other | |
239 | * CPU. We need to avoid to SMP race with it. NOTE: we don' t need | |
240 | * the irq version of write_lock because as just said we have irq | |
241 | * locally disabled. -arca | |
242 | */ | |
243 | write_lock(&xtime_lock); | |
1da177e4 LT |
244 | asm ("getcon cr62, %0" : "=r" (current_ctc)); |
245 | ctc_last_interrupt = (unsigned long) current_ctc; | |
246 | ||
3171a030 | 247 | do_timer(1); |
1da177e4 LT |
248 | |
249 | #ifdef CONFIG_HEARTBEAT | |
b4eaa1cc PM |
250 | if (sh_mv.mv_heartbeat != NULL) |
251 | sh_mv.mv_heartbeat(); | |
1da177e4 LT |
252 | #endif |
253 | ||
254 | /* | |
255 | * If we have an externally synchronized Linux clock, then update | |
256 | * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be | |
257 | * called as close as possible to 500 ms before the new second starts. | |
258 | */ | |
b149ee22 | 259 | if (ntp_synced() && |
1da177e4 LT |
260 | xtime.tv_sec > last_rtc_update + 660 && |
261 | (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && | |
262 | (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { | |
6c7e2a55 | 263 | if (rtc_sh_set_time(xtime.tv_sec) == 0) |
1da177e4 LT |
264 | last_rtc_update = xtime.tv_sec; |
265 | else | |
6c7e2a55 PM |
266 | /* do it again in 60 s */ |
267 | last_rtc_update = xtime.tv_sec - 600; | |
1da177e4 | 268 | } |
960c65e8 PZ |
269 | write_unlock(&xtime_lock); |
270 | ||
271 | #ifndef CONFIG_SMP | |
272 | update_process_times(user_mode(get_irq_regs())); | |
273 | #endif | |
1da177e4 LT |
274 | } |
275 | ||
276 | /* | |
277 | * This is the same as the above, except we _also_ save the current | |
278 | * Time Stamp Counter value at the time of the timer interrupt, so that | |
279 | * we later on can estimate the time of day more exactly. | |
280 | */ | |
a226d33a | 281 | static irqreturn_t timer_interrupt(int irq, void *dev_id) |
1da177e4 LT |
282 | { |
283 | unsigned long timer_status; | |
284 | ||
285 | /* Clear UNF bit */ | |
286 | timer_status = ctrl_inw(TMU0_TCR); | |
287 | timer_status &= ~0x100; | |
288 | ctrl_outw(timer_status, TMU0_TCR); | |
289 | ||
a226d33a | 290 | do_timer_interrupt(); |
1da177e4 LT |
291 | |
292 | return IRQ_HANDLED; | |
293 | } | |
294 | ||
1da177e4 LT |
295 | |
296 | static __init unsigned int get_cpu_hz(void) | |
297 | { | |
298 | unsigned int count; | |
299 | unsigned long __dummy; | |
300 | unsigned long ctc_val_init, ctc_val; | |
301 | ||
302 | /* | |
303 | ** Regardless the toolchain, force the compiler to use the | |
304 | ** arbitrary register r3 as a clock tick counter. | |
2a10e0b2 | 305 | ** NOTE: r3 must be in accordance with sh64_rtc_interrupt() |
1da177e4 LT |
306 | */ |
307 | register unsigned long long __rtc_irq_flag __asm__ ("r3"); | |
308 | ||
309 | local_irq_enable(); | |
6c7e2a55 PM |
310 | do {} while (ctrl_inb(rtc_base) != 0); |
311 | ctrl_outb(RTC_RCR1_CIE, RTC_RCR1); /* Enable carry interrupt */ | |
1da177e4 LT |
312 | |
313 | /* | |
314 | * r3 is arbitrary. CDC does not support "=z". | |
315 | */ | |
316 | ctc_val_init = 0xffffffff; | |
317 | ctc_val = ctc_val_init; | |
318 | ||
319 | asm volatile("gettr tr0, %1\n\t" | |
320 | "putcon %0, " __CTC "\n\t" | |
321 | "and %2, r63, %2\n\t" | |
322 | "pta $+4, tr0\n\t" | |
323 | "beq/l %2, r63, tr0\n\t" | |
324 | "ptabs %1, tr0\n\t" | |
325 | "getcon " __CTC ", %0\n\t" | |
326 | : "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag) | |
327 | : "0" (0)); | |
328 | local_irq_disable(); | |
329 | /* | |
330 | * SH-3: | |
331 | * CPU clock = 4 stages * loop | |
332 | * tst rm,rm if id ex | |
333 | * bt/s 1b if id ex | |
334 | * add #1,rd if id ex | |
335 | * (if) pipe line stole | |
336 | * tst rm,rm if id ex | |
337 | * .... | |
338 | * | |
339 | * | |
340 | * SH-4: | |
341 | * CPU clock = 6 stages * loop | |
342 | * I don't know why. | |
343 | * .... | |
344 | * | |
345 | * SH-5: | |
346 | * Use CTC register to count. This approach returns the right value | |
347 | * even if the I-cache is disabled (e.g. whilst debugging.) | |
348 | * | |
349 | */ | |
350 | ||
351 | count = ctc_val_init - ctc_val; /* CTC counts down */ | |
352 | ||
1da177e4 LT |
353 | /* |
354 | * This really is count by the number of clock cycles | |
355 | * by the ratio between a complete R64CNT | |
356 | * wrap-around (128) and CUI interrupt being raised (64). | |
357 | */ | |
358 | return count*2; | |
1da177e4 LT |
359 | } |
360 | ||
a226d33a | 361 | static irqreturn_t sh64_rtc_interrupt(int irq, void *dev_id) |
1da177e4 | 362 | { |
a226d33a PM |
363 | struct pt_regs *regs = get_irq_regs(); |
364 | ||
6c7e2a55 | 365 | ctrl_outb(0, RTC_RCR1); /* Disable Carry Interrupts */ |
1da177e4 LT |
366 | regs->regs[3] = 1; /* Using r3 */ |
367 | ||
368 | return IRQ_HANDLED; | |
369 | } | |
370 | ||
948d12cb TG |
371 | static struct irqaction irq0 = { |
372 | .handler = timer_interrupt, | |
373 | .flags = IRQF_DISABLED, | |
374 | .mask = CPU_MASK_NONE, | |
375 | .name = "timer", | |
376 | }; | |
377 | static struct irqaction irq1 = { | |
378 | .handler = sh64_rtc_interrupt, | |
379 | .flags = IRQF_DISABLED, | |
380 | .mask = CPU_MASK_NONE, | |
381 | .name = "rtc", | |
382 | }; | |
1da177e4 LT |
383 | |
384 | void __init time_init(void) | |
385 | { | |
386 | unsigned int cpu_clock, master_clock, bus_clock, module_clock; | |
387 | unsigned long interval; | |
388 | unsigned long frqcr, ifc, pfc; | |
389 | static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 }; | |
390 | #define bfc_table ifc_table /* Same */ | |
391 | #define pfc_table ifc_table /* Same */ | |
392 | ||
393 | tmu_base = onchip_remap(TMU_BASE, 1024, "TMU"); | |
394 | if (!tmu_base) { | |
395 | panic("Unable to remap TMU\n"); | |
396 | } | |
397 | ||
398 | rtc_base = onchip_remap(RTC_BASE, 1024, "RTC"); | |
399 | if (!rtc_base) { | |
400 | panic("Unable to remap RTC\n"); | |
401 | } | |
402 | ||
403 | cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC"); | |
404 | if (!cprc_base) { | |
405 | panic("Unable to remap CPRC\n"); | |
406 | } | |
407 | ||
6c7e2a55 | 408 | rtc_sh_get_time(&xtime); |
1da177e4 LT |
409 | |
410 | setup_irq(TIMER_IRQ, &irq0); | |
411 | setup_irq(RTC_IRQ, &irq1); | |
412 | ||
413 | /* Check how fast it is.. */ | |
414 | cpu_clock = get_cpu_hz(); | |
415 | ||
416 | /* Note careful order of operations to maintain reasonable precision and avoid overflow. */ | |
417 | scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ)); | |
418 | ||
6c7e2a55 | 419 | free_irq(RTC_IRQ, NULL); |
1da177e4 LT |
420 | |
421 | printk("CPU clock: %d.%02dMHz\n", | |
422 | (cpu_clock / 1000000), (cpu_clock % 1000000)/10000); | |
423 | { | |
424 | unsigned short bfc; | |
425 | frqcr = ctrl_inl(FRQCR); | |
426 | ifc = ifc_table[(frqcr>> 6) & 0x0007]; | |
427 | bfc = bfc_table[(frqcr>> 3) & 0x0007]; | |
428 | pfc = pfc_table[(frqcr>> 12) & 0x0007]; | |
429 | master_clock = cpu_clock * ifc; | |
430 | bus_clock = master_clock/bfc; | |
431 | } | |
432 | ||
433 | printk("Bus clock: %d.%02dMHz\n", | |
434 | (bus_clock/1000000), (bus_clock % 1000000)/10000); | |
435 | module_clock = master_clock/pfc; | |
436 | printk("Module clock: %d.%02dMHz\n", | |
437 | (module_clock/1000000), (module_clock % 1000000)/10000); | |
438 | interval = (module_clock/(HZ*4)); | |
439 | ||
440 | printk("Interval = %ld\n", interval); | |
441 | ||
442 | current_cpu_data.cpu_clock = cpu_clock; | |
443 | current_cpu_data.master_clock = master_clock; | |
444 | current_cpu_data.bus_clock = bus_clock; | |
445 | current_cpu_data.module_clock = module_clock; | |
446 | ||
447 | /* Start TMU0 */ | |
448 | ctrl_outb(TMU_TSTR_OFF, TMU_TSTR); | |
449 | ctrl_outb(TMU_TOCR_INIT, TMU_TOCR); | |
450 | ctrl_outw(TMU0_TCR_INIT, TMU0_TCR); | |
451 | ctrl_outl(interval, TMU0_TCOR); | |
452 | ctrl_outl(interval, TMU0_TCNT); | |
453 | ctrl_outb(TMU_TSTR_INIT, TMU_TSTR); | |
454 | } | |
455 | ||
456 | void enter_deep_standby(void) | |
457 | { | |
458 | /* Disable watchdog timer */ | |
459 | ctrl_outl(0xa5000000, WTCSR); | |
460 | /* Configure deep standby on sleep */ | |
461 | ctrl_outl(0x03, STBCR); | |
462 | ||
463 | #ifdef CONFIG_SH_ALPHANUMERIC | |
464 | { | |
465 | extern void mach_alphanum(int position, unsigned char value); | |
466 | extern void mach_alphanum_brightness(int setting); | |
467 | char halted[] = "Halted. "; | |
468 | int i; | |
469 | mach_alphanum_brightness(6); /* dimmest setting above off */ | |
470 | for (i=0; i<8; i++) { | |
471 | mach_alphanum(i, halted[i]); | |
472 | } | |
473 | asm __volatile__ ("synco"); | |
474 | } | |
475 | #endif | |
476 | ||
477 | asm __volatile__ ("sleep"); | |
478 | asm __volatile__ ("synci"); | |
479 | asm __volatile__ ("nop"); | |
480 | asm __volatile__ ("nop"); | |
481 | asm __volatile__ ("nop"); | |
482 | asm __volatile__ ("nop"); | |
483 | panic("Unexpected wakeup!\n"); | |
484 | } | |
6c7e2a55 PM |
485 | |
486 | static struct resource rtc_resources[] = { | |
487 | [0] = { | |
488 | /* RTC base, filled in by rtc_init */ | |
489 | .flags = IORESOURCE_IO, | |
490 | }, | |
491 | [1] = { | |
492 | /* Period IRQ */ | |
493 | .start = IRQ_PRI, | |
494 | .flags = IORESOURCE_IRQ, | |
495 | }, | |
496 | [2] = { | |
497 | /* Carry IRQ */ | |
498 | .start = IRQ_CUI, | |
499 | .flags = IORESOURCE_IRQ, | |
500 | }, | |
501 | [3] = { | |
502 | /* Alarm IRQ */ | |
503 | .start = IRQ_ATI, | |
504 | .flags = IORESOURCE_IRQ, | |
505 | }, | |
506 | }; | |
507 | ||
508 | static struct platform_device rtc_device = { | |
509 | .name = "sh-rtc", | |
510 | .id = -1, | |
511 | .num_resources = ARRAY_SIZE(rtc_resources), | |
512 | .resource = rtc_resources, | |
513 | }; | |
514 | ||
515 | static int __init rtc_init(void) | |
516 | { | |
517 | rtc_resources[0].start = rtc_base; | |
518 | rtc_resources[0].end = rtc_resources[0].start + 0x58 - 1; | |
519 | ||
520 | return platform_device_register(&rtc_device); | |
521 | } | |
522 | device_initcall(rtc_init); |