2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/stddef.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/bootmem.h>
14 #include <linux/seq_file.h>
15 #include <linux/proc_fs.h>
16 #include <linux/module.h>
18 #include <asm/system.h>
20 #include <asm/iSeries/ItLpQueue.h>
21 #include <asm/iSeries/HvLpEvent.h>
22 #include <asm/iSeries/HvCallEvent.h>
23 #include <asm/iSeries/ItLpNaca.h>
26 * The LpQueue is used to pass event data from the hypervisor to
27 * the partition. This is where I/O interrupt events are communicated.
29 * It is written to by the hypervisor so cannot end up in the BSS.
31 struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
33 DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
35 static char *event_types[HvLpEvent_Type_NumTypes] = {
47 /* Array of LpEvent handler functions */
48 static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
49 static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
51 static struct HvLpEvent * get_next_hvlpevent(void)
53 struct HvLpEvent * event;
54 event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
56 if (event->xFlags.xValid) {
57 /* rmb() needed only for weakly consistent machines (regatta) */
59 /* Set pointer to next potential event */
60 hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 +
61 LpEventAlign) / LpEventAlign) * LpEventAlign;
63 /* Wrap to beginning if no room at end */
64 if (hvlpevent_queue.xSlicCurEventPtr >
65 hvlpevent_queue.xSlicLastValidEventPtr) {
66 hvlpevent_queue.xSlicCurEventPtr =
67 hvlpevent_queue.xSlicEventStackPtr;
76 static unsigned long spread_lpevents = NR_CPUS;
78 int hvlpevent_is_pending(void)
80 struct HvLpEvent *next_event;
82 if (smp_processor_id() >= spread_lpevents)
85 next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
87 return next_event->xFlags.xValid |
88 hvlpevent_queue.xPlicOverflowIntPending;
91 static void hvlpevent_clear_valid(struct HvLpEvent * event)
93 /* Tell the Hypervisor that we're done with this event.
94 * Also clear bits within this event that might look like valid bits.
95 * ie. on 64-byte boundaries.
97 struct HvLpEvent *tmp;
98 unsigned extra = ((event->xSizeMinus1 + LpEventAlign) /
103 tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign);
104 tmp->xFlags.xValid = 0;
106 tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign);
107 tmp->xFlags.xValid = 0;
109 tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign);
110 tmp->xFlags.xValid = 0;
115 event->xFlags.xValid = 0;
118 void process_hvlpevents(struct pt_regs *regs)
120 struct HvLpEvent * event;
122 /* If we have recursed, just return */
123 if (!spin_trylock(&hvlpevent_queue.lock))
127 event = get_next_hvlpevent();
129 /* Call appropriate handler here, passing
130 * a pointer to the LpEvent. The handler
131 * must make a copy of the LpEvent if it
132 * needs it in a bottom half. (perhaps for
135 * Handlers are responsible for ACK processing
137 * The Hypervisor guarantees that LpEvents will
138 * only be delivered with types that we have
139 * registered for, so no type check is necessary
142 if (event->xType < HvLpEvent_Type_NumTypes)
143 __get_cpu_var(hvlpevent_counts)[event->xType]++;
144 if (event->xType < HvLpEvent_Type_NumTypes &&
145 lpEventHandler[event->xType])
146 lpEventHandler[event->xType](event, regs);
148 printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
150 hvlpevent_clear_valid(event);
151 } else if (hvlpevent_queue.xPlicOverflowIntPending)
153 * No more valid events. If overflow events are
154 * pending process them
156 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex);
161 spin_unlock(&hvlpevent_queue.lock);
164 static int set_spread_lpevents(char *str)
166 unsigned long val = simple_strtoul(str, NULL, 0);
169 * The parameter is the number of processors to share in processing
172 if (( val > 0) && (val <= NR_CPUS)) {
173 spread_lpevents = val;
174 printk("lpevent processing spread over %ld processors\n", val);
176 printk("invalid spread_lpevents %ld\n", val);
181 __setup("spread_lpevents=", set_spread_lpevents);
183 void setup_hvlpevent_queue(void)
187 /* Allocate a page for the Event Stack. */
188 eventStack = alloc_bootmem_pages(LpEventStackSize);
189 memset(eventStack, 0, LpEventStackSize);
191 /* Invoke the hypervisor to initialize the event stack */
192 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
194 hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
195 hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
196 hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
197 (LpEventStackSize - LpEventMaxSize);
198 hvlpevent_queue.xIndex = 0;
201 /* Register a handler for an LpEvent type */
202 int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
204 if (eventType < HvLpEvent_Type_NumTypes) {
205 lpEventHandler[eventType] = handler;
210 EXPORT_SYMBOL(HvLpEvent_registerHandler);
212 int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
216 if (eventType < HvLpEvent_Type_NumTypes) {
217 if (!lpEventHandlerPaths[eventType]) {
218 lpEventHandler[eventType] = NULL;
220 * We now sleep until all other CPUs have scheduled.
221 * This ensures that the deletion is seen by all
222 * other CPUs, and that the deleted handler isn't
223 * still running on another CPU when we return.
231 EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
234 * lpIndex is the partition index of the target partition.
235 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
236 * indicates to use our partition index - for the other types.
238 int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
240 if ((eventType < HvLpEvent_Type_NumTypes) &&
241 lpEventHandler[eventType]) {
243 lpIndex = itLpNaca.xLpIndex;
244 HvCallEvent_openLpEventPath(lpIndex, eventType);
245 ++lpEventHandlerPaths[eventType];
251 int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
253 if ((eventType < HvLpEvent_Type_NumTypes) &&
254 lpEventHandler[eventType] &&
255 lpEventHandlerPaths[eventType]) {
257 lpIndex = itLpNaca.xLpIndex;
258 HvCallEvent_closeLpEventPath(lpIndex, eventType);
259 --lpEventHandlerPaths[eventType];
265 static int proc_lpevents_show(struct seq_file *m, void *v)
269 static unsigned long cpu_totals[NR_CPUS];
271 /* FIXME: do we care that there's no locking here? */
273 for_each_online_cpu(cpu) {
275 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
276 cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
278 sum += cpu_totals[cpu];
281 seq_printf(m, "LpEventQueue 0\n");
282 seq_printf(m, " events processed:\t%lu\n", sum);
284 for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
286 for_each_online_cpu(cpu) {
287 sum += per_cpu(hvlpevent_counts, cpu)[i];
290 seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
293 seq_printf(m, "\n events processed by processor:\n");
295 for_each_online_cpu(cpu) {
296 seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
302 static int proc_lpevents_open(struct inode *inode, struct file *file)
304 return single_open(file, proc_lpevents_show, NULL);
307 static struct file_operations proc_lpevents_operations = {
308 .open = proc_lpevents_open,
311 .release = single_release,
314 static int __init proc_lpevents_init(void)
316 struct proc_dir_entry *e;
318 e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
320 e->proc_fops = &proc_lpevents_operations;
324 __initcall(proc_lpevents_init);