1 /* sun4v_ivec.S: Sun4v interrupt vector handling.
3 * Copyright (C) 2006 <davem@davemloft.net>
6 #include <asm/cpudata.h>
7 #include <asm/intr_queue.h>
14 /* Head offset in %g2, tail offset in %g4.
15 * If they are the same, no work.
17 mov INTRQ_CPU_MONDO_HEAD, %g2
18 ldxa [%g2] ASI_QUEUE, %g2
19 mov INTRQ_CPU_MONDO_TAIL, %g4
20 ldxa [%g4] ASI_QUEUE, %g4
22 be,pn %xcc, sun4v_cpu_mondo_queue_empty
25 /* Get &trap_block[smp_processor_id()] into %g4. */
26 ldxa [%g0] ASI_SCRATCHPAD, %g4
27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
29 /* Get CPU mondo queue base phys address into %g7. */
30 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
32 /* Now get the cross-call arguments and handler PC, same
35 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
36 * high half is context arg to MMU flushes, into %g5
37 * 2nd 64-bit word: 64-bit arg, load into %g1
38 * 3rd 64-bit word: 64-bit arg, load into %g7
40 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
43 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
46 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
47 add %g2, 0x40 - 0x8 - 0x8, %g2
49 /* Update queue head pointer. */
50 lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
53 mov INTRQ_CPU_MONDO_HEAD, %g4
54 stxa %g2, [%g4] ASI_QUEUE
60 sun4v_cpu_mondo_queue_empty:
64 /* Head offset in %g2, tail offset in %g4. */
65 mov INTRQ_DEVICE_MONDO_HEAD, %g2
66 ldxa [%g2] ASI_QUEUE, %g2
67 mov INTRQ_DEVICE_MONDO_TAIL, %g4
68 ldxa [%g4] ASI_QUEUE, %g4
70 be,pn %xcc, sun4v_dev_mondo_queue_empty
73 /* Get &trap_block[smp_processor_id()] into %g4. */
74 ldxa [%g0] ASI_SCRATCHPAD, %g4
75 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
77 /* Get DEV mondo queue base phys address into %g5. */
78 ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
80 /* Load IVEC into %g3. */
81 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
84 /* XXX There can be a full 64-byte block of data here.
85 * XXX This is how we can get at MSI vector data.
86 * XXX Current we do not capture this, but when we do we'll
87 * XXX need to add a 64-byte storage area in the struct ino_bucket
88 * XXX or the struct irq_desc.
91 /* Update queue head pointer, this frees up some registers. */
92 lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
95 mov INTRQ_DEVICE_MONDO_HEAD, %g4
96 stxa %g2, [%g4] ASI_QUEUE
99 /* Get &__irq_work[smp_processor_id()] into %g1. */
100 TRAP_LOAD_IRQ_WORK(%g1, %g4)
102 /* Get &ivector_table[IVEC] into %g4. */
103 sethi %hi(ivector_table), %g4
105 or %g4, %lo(ivector_table), %g4
108 /* Insert ivector_table[] entry into __irq_work[] queue. */
109 lduw [%g1], %g2 /* g2 = irq_work(cpu) */
110 stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */
111 stw %g4, [%g1] /* irq_work(cpu) = bucket */
113 /* Signal the interrupt by setting (1 << pil) in %softint. */
114 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
116 sun4v_dev_mondo_queue_empty:
120 /* Head offset in %g2, tail offset in %g4. */
121 mov INTRQ_RESUM_MONDO_HEAD, %g2
122 ldxa [%g2] ASI_QUEUE, %g2
123 mov INTRQ_RESUM_MONDO_TAIL, %g4
124 ldxa [%g4] ASI_QUEUE, %g4
126 be,pn %xcc, sun4v_res_mondo_queue_empty
129 /* Get &trap_block[smp_processor_id()] into %g3. */
130 ldxa [%g0] ASI_SCRATCHPAD, %g3
131 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
133 /* Get RES mondo queue base phys address into %g5. */
134 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
136 /* Get RES kernel buffer base phys address into %g7. */
137 ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
139 /* If the first word is non-zero, queue is full. */
140 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
141 brnz,pn %g1, sun4v_res_mondo_queue_full
144 lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
146 /* Remember this entry's offset in %g1. */
149 /* Copy 64-byte queue entry into kernel buffer. */
150 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
151 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
153 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
154 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
156 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
157 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
159 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
160 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
162 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
163 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
165 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
166 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
168 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
169 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
171 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
172 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
175 /* Update queue head pointer. */
178 mov INTRQ_RESUM_MONDO_HEAD, %g4
179 stxa %g2, [%g4] ASI_QUEUE
182 /* Disable interrupts and save register state so we can call
183 * C code. The etrap handling will leave %g4 in %l4 for us
189 ba,pt %xcc, etrap_irq
191 #ifdef CONFIG_TRACE_IRQFLAGS
192 call trace_hardirqs_off
196 add %sp, PTREGS_OFF, %o0
197 call sun4v_resum_error
200 /* Return from trap. */
201 ba,pt %xcc, rtrap_irq
204 sun4v_res_mondo_queue_empty:
207 sun4v_res_mondo_queue_full:
208 /* The queue is full, consolidate our damage by setting
209 * the head equal to the tail. We'll just trap again otherwise.
210 * Call C code to log the event.
212 mov INTRQ_RESUM_MONDO_HEAD, %g2
213 stxa %g4, [%g2] ASI_QUEUE
218 ba,pt %xcc, etrap_irq
220 #ifdef CONFIG_TRACE_IRQFLAGS
221 call trace_hardirqs_off
224 call sun4v_resum_overflow
225 add %sp, PTREGS_OFF, %o0
227 ba,pt %xcc, rtrap_irq
231 /* Head offset in %g2, tail offset in %g4. */
232 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
233 ldxa [%g2] ASI_QUEUE, %g2
234 mov INTRQ_NONRESUM_MONDO_TAIL, %g4
235 ldxa [%g4] ASI_QUEUE, %g4
237 be,pn %xcc, sun4v_nonres_mondo_queue_empty
240 /* Get &trap_block[smp_processor_id()] into %g3. */
241 ldxa [%g0] ASI_SCRATCHPAD, %g3
242 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
244 /* Get RES mondo queue base phys address into %g5. */
245 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
247 /* Get RES kernel buffer base phys address into %g7. */
248 ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
250 /* If the first word is non-zero, queue is full. */
251 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
252 brnz,pn %g1, sun4v_nonres_mondo_queue_full
255 lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
257 /* Remember this entry's offset in %g1. */
260 /* Copy 64-byte queue entry into kernel buffer. */
261 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
262 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
264 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
265 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
267 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
268 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
270 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
271 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
273 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
274 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
276 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
277 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
279 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
280 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
282 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
283 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
286 /* Update queue head pointer. */
289 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
290 stxa %g2, [%g4] ASI_QUEUE
293 /* Disable interrupts and save register state so we can call
294 * C code. The etrap handling will leave %g4 in %l4 for us
300 ba,pt %xcc, etrap_irq
302 #ifdef CONFIG_TRACE_IRQFLAGS
303 call trace_hardirqs_off
307 add %sp, PTREGS_OFF, %o0
308 call sun4v_nonresum_error
311 /* Return from trap. */
312 ba,pt %xcc, rtrap_irq
315 sun4v_nonres_mondo_queue_empty:
318 sun4v_nonres_mondo_queue_full:
319 /* The queue is full, consolidate our damage by setting
320 * the head equal to the tail. We'll just trap again otherwise.
321 * Call C code to log the event.
323 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
324 stxa %g4, [%g2] ASI_QUEUE
329 ba,pt %xcc, etrap_irq
331 #ifdef CONFIG_TRACE_IRQFLAGS
332 call trace_hardirqs_off
335 call sun4v_nonresum_overflow
336 add %sp, PTREGS_OFF, %o0
338 ba,pt %xcc, rtrap_irq