1 /* sun4v_ivec.S: Sun4v interrupt vector handling.
3 * Copyright (C) 2006 <davem@davemloft.net>
6 #include <asm/cpudata.h>
7 #include <asm/intr_queue.h>
14 /* Head offset in %g2, tail offset in %g4.
15 * If they are the same, no work.
17 mov INTRQ_CPU_MONDO_HEAD, %g2
18 ldxa [%g2] ASI_QUEUE, %g2
19 mov INTRQ_CPU_MONDO_TAIL, %g4
20 ldxa [%g4] ASI_QUEUE, %g4
22 be,pn %xcc, sun4v_cpu_mondo_queue_empty
25 /* Get &trap_block[smp_processor_id()] into %g4. */
26 ldxa [%g0] ASI_SCRATCHPAD, %g4
27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
29 /* Get CPU mondo queue base phys address into %g7. */
30 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
32 /* Now get the cross-call arguments and handler PC, same
35 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
36 * high half is context arg to MMU flushes, into %g5
37 * 2nd 64-bit word: 64-bit arg, load into %g1
38 * 3rd 64-bit word: 64-bit arg, load into %g7
40 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
43 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
46 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
47 add %g2, 0x40 - 0x8 - 0x8, %g2
49 /* Update queue head pointer. */
50 lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
53 mov INTRQ_CPU_MONDO_HEAD, %g4
54 stxa %g2, [%g4] ASI_QUEUE
60 sun4v_cpu_mondo_queue_empty:
64 /* Head offset in %g2, tail offset in %g4. */
65 mov INTRQ_DEVICE_MONDO_HEAD, %g2
66 ldxa [%g2] ASI_QUEUE, %g2
67 mov INTRQ_DEVICE_MONDO_TAIL, %g4
68 ldxa [%g4] ASI_QUEUE, %g4
70 be,pn %xcc, sun4v_dev_mondo_queue_empty
73 /* Get &trap_block[smp_processor_id()] into %g4. */
74 ldxa [%g0] ASI_SCRATCHPAD, %g4
75 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
77 /* Get DEV mondo queue base phys address into %g5. */
78 ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
80 /* Load IVEC into %g3. */
81 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
84 /* XXX There can be a full 64-byte block of data here.
85 * XXX This is how we can get at MSI vector data.
86 * XXX Current we do not capture this, but when we do we'll
87 * XXX need to add a 64-byte storage area in the struct ino_bucket
88 * XXX or the struct irq_desc.
91 /* Update queue head pointer, this frees up some registers. */
92 lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
95 mov INTRQ_DEVICE_MONDO_HEAD, %g4
96 stxa %g2, [%g4] ASI_QUEUE
99 TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
101 /* For VIRQs, cookie is encoded as ~bucket_phys_addr */
105 /* Get __pa(&ivector_table[IVEC]) into %g4. */
106 sethi %hi(ivector_table_pa), %g4
107 ldx [%g4 + %lo(ivector_table_pa)], %g4
112 stxa %g2, [%g4] ASI_PHYS_USE_EC
115 /* Signal the interrupt by setting (1 << pil) in %softint. */
116 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
118 sun4v_dev_mondo_queue_empty:
122 /* Head offset in %g2, tail offset in %g4. */
123 mov INTRQ_RESUM_MONDO_HEAD, %g2
124 ldxa [%g2] ASI_QUEUE, %g2
125 mov INTRQ_RESUM_MONDO_TAIL, %g4
126 ldxa [%g4] ASI_QUEUE, %g4
128 be,pn %xcc, sun4v_res_mondo_queue_empty
131 /* Get &trap_block[smp_processor_id()] into %g3. */
132 ldxa [%g0] ASI_SCRATCHPAD, %g3
133 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
135 /* Get RES mondo queue base phys address into %g5. */
136 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
138 /* Get RES kernel buffer base phys address into %g7. */
139 ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
141 /* If the first word is non-zero, queue is full. */
142 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
143 brnz,pn %g1, sun4v_res_mondo_queue_full
146 lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
148 /* Remember this entry's offset in %g1. */
151 /* Copy 64-byte queue entry into kernel buffer. */
152 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
153 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
155 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
156 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
158 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
159 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
161 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
162 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
164 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
165 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
167 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
168 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
170 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
171 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
173 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
174 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
177 /* Update queue head pointer. */
180 mov INTRQ_RESUM_MONDO_HEAD, %g4
181 stxa %g2, [%g4] ASI_QUEUE
184 /* Disable interrupts and save register state so we can call
185 * C code. The etrap handling will leave %g4 in %l4 for us
189 wrpr %g0, PIL_NORMAL_MAX, %pil
191 ba,pt %xcc, etrap_irq
193 #ifdef CONFIG_TRACE_IRQFLAGS
194 call trace_hardirqs_off
198 add %sp, PTREGS_OFF, %o0
199 call sun4v_resum_error
202 /* Return from trap. */
203 ba,pt %xcc, rtrap_irq
206 sun4v_res_mondo_queue_empty:
209 sun4v_res_mondo_queue_full:
210 /* The queue is full, consolidate our damage by setting
211 * the head equal to the tail. We'll just trap again otherwise.
212 * Call C code to log the event.
214 mov INTRQ_RESUM_MONDO_HEAD, %g2
215 stxa %g4, [%g2] ASI_QUEUE
219 wrpr %g0, PIL_NORMAL_MAX, %pil
220 ba,pt %xcc, etrap_irq
222 #ifdef CONFIG_TRACE_IRQFLAGS
223 call trace_hardirqs_off
226 call sun4v_resum_overflow
227 add %sp, PTREGS_OFF, %o0
229 ba,pt %xcc, rtrap_irq
233 /* Head offset in %g2, tail offset in %g4. */
234 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
235 ldxa [%g2] ASI_QUEUE, %g2
236 mov INTRQ_NONRESUM_MONDO_TAIL, %g4
237 ldxa [%g4] ASI_QUEUE, %g4
239 be,pn %xcc, sun4v_nonres_mondo_queue_empty
242 /* Get &trap_block[smp_processor_id()] into %g3. */
243 ldxa [%g0] ASI_SCRATCHPAD, %g3
244 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
246 /* Get RES mondo queue base phys address into %g5. */
247 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
249 /* Get RES kernel buffer base phys address into %g7. */
250 ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
252 /* If the first word is non-zero, queue is full. */
253 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
254 brnz,pn %g1, sun4v_nonres_mondo_queue_full
257 lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
259 /* Remember this entry's offset in %g1. */
262 /* Copy 64-byte queue entry into kernel buffer. */
263 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
264 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
266 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
267 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
269 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
270 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
272 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
273 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
275 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
276 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
278 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
279 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
281 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
282 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
284 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
285 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
288 /* Update queue head pointer. */
291 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
292 stxa %g2, [%g4] ASI_QUEUE
295 /* Disable interrupts and save register state so we can call
296 * C code. The etrap handling will leave %g4 in %l4 for us
300 wrpr %g0, PIL_NORMAL_MAX, %pil
302 ba,pt %xcc, etrap_irq
304 #ifdef CONFIG_TRACE_IRQFLAGS
305 call trace_hardirqs_off
309 add %sp, PTREGS_OFF, %o0
310 call sun4v_nonresum_error
313 /* Return from trap. */
314 ba,pt %xcc, rtrap_irq
317 sun4v_nonres_mondo_queue_empty:
320 sun4v_nonres_mondo_queue_full:
321 /* The queue is full, consolidate our damage by setting
322 * the head equal to the tail. We'll just trap again otherwise.
323 * Call C code to log the event.
325 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
326 stxa %g4, [%g2] ASI_QUEUE
330 wrpr %g0, PIL_NORMAL_MAX, %pil
331 ba,pt %xcc, etrap_irq
333 #ifdef CONFIG_TRACE_IRQFLAGS
334 call trace_hardirqs_off
337 call sun4v_nonresum_overflow
338 add %sp, PTREGS_OFF, %o0
340 ba,pt %xcc, rtrap_irq