Merge branch 'hwmon-for-linus' of git://jdelvare.pck.nerim.net/jdelvare-2.6
[linux-2.6] / arch / powerpc / platforms / iseries / viopath.c
1 /* -*- linux-c -*-
2  *
3  *  iSeries Virtual I/O Message Path code
4  *
5  *  Authors: Dave Boutcher <boutcher@us.ibm.com>
6  *           Ryan Arnold <ryanarn@us.ibm.com>
7  *           Colin Devilbiss <devilbis@us.ibm.com>
8  *
9  * (C) Copyright 2000-2005 IBM Corporation
10  *
11  * This code is used by the iSeries virtual disk, cd,
12  * tape, and console to communicate with OS/400 in another
13  * partition.
14  *
15  * This program is free software;  you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License as
17  * published by the Free Software Foundation; either version 2 of the
18  * License, or (at your option) anyu later version.
19  *
20  * This program is distributed in the hope that it will be useful, but
21  * WITHOUT ANY WARRANTY; without even the implied warranty of
22  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23  * General Public License for more details.
24  *
25  * You should have received a copy of the GNU General Public License
26  * along with this program; if not, write to the Free Software Foundation,
27  * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28  *
29  */
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/errno.h>
33 #include <linux/vmalloc.h>
34 #include <linux/string.h>
35 #include <linux/proc_fs.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/wait.h>
38 #include <linux/seq_file.h>
39 #include <linux/interrupt.h>
40
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/prom.h>
44 #include <asm/firmware.h>
45 #include <asm/iseries/hv_types.h>
46 #include <asm/iseries/hv_lp_event.h>
47 #include <asm/iseries/hv_lp_config.h>
48 #include <asm/iseries/mf.h>
49 #include <asm/iseries/vio.h>
50
51 /* Status of the path to each other partition in the system.
52  * This is overkill, since we will only ever establish connections
53  * to our hosting partition and the primary partition on the system.
54  * But this allows for other support in the future.
55  */
56 static struct viopathStatus {
57         int isOpen;             /* Did we open the path?            */
58         int isActive;           /* Do we have a mon msg outstanding */
59         int users[VIO_MAX_SUBTYPES];
60         HvLpInstanceId mSourceInst;
61         HvLpInstanceId mTargetInst;
62         int numberAllocated;
63 } viopathStatus[HVMAXARCHITECTEDLPS];
64
65 static DEFINE_SPINLOCK(statuslock);
66
67 /*
68  * For each kind of event we allocate a buffer that is
69  * guaranteed not to cross a page boundary
70  */
71 static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256]
72         __attribute__((__aligned__(4096)));
73 static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
74 static int event_buffer_initialised;
75
76 static void handleMonitorEvent(struct HvLpEvent *event);
77
78 /*
79  * We use this structure to handle asynchronous responses.  The caller
80  * blocks on the semaphore and the handler posts the semaphore.  However,
81  * if system_state is not SYSTEM_RUNNING, then wait_atomic is used ...
82  */
83 struct alloc_parms {
84         struct semaphore sem;
85         int number;
86         atomic_t wait_atomic;
87         int used_wait_atomic;
88 };
89
90 /* Put a sequence number in each mon msg.  The value is not
91  * important.  Start at something other than 0 just for
92  * readability.  wrapping this is ok.
93  */
94 static u8 viomonseq = 22;
95
96 /* Our hosting logical partition.  We get this at startup
97  * time, and different modules access this variable directly.
98  */
99 HvLpIndex viopath_hostLp = HvLpIndexInvalid;
100 EXPORT_SYMBOL(viopath_hostLp);
101 HvLpIndex viopath_ourLp = HvLpIndexInvalid;
102 EXPORT_SYMBOL(viopath_ourLp);
103
104 /* For each kind of incoming event we set a pointer to a
105  * routine to call.
106  */
107 static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
108
109 #define VIOPATH_KERN_WARN       KERN_WARNING "viopath: "
110 #define VIOPATH_KERN_INFO       KERN_INFO "viopath: "
111
112 static int proc_viopath_show(struct seq_file *m, void *v)
113 {
114         char *buf;
115         u16 vlanMap;
116         dma_addr_t handle;
117         HvLpEvent_Rc hvrc;
118         DECLARE_MUTEX_LOCKED(Semaphore);
119         struct device_node *node;
120         const char *sysid;
121
122         buf = kzalloc(HW_PAGE_SIZE, GFP_KERNEL);
123         if (!buf)
124                 return 0;
125
126         handle = dma_map_single(iSeries_vio_dev, buf, HW_PAGE_SIZE,
127                                 DMA_FROM_DEVICE);
128
129         hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
130                         HvLpEvent_Type_VirtualIo,
131                         viomajorsubtype_config | vioconfigget,
132                         HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
133                         viopath_sourceinst(viopath_hostLp),
134                         viopath_targetinst(viopath_hostLp),
135                         (u64)(unsigned long)&Semaphore, VIOVERSION << 16,
136                         ((u64)handle) << 32, HW_PAGE_SIZE, 0, 0);
137
138         if (hvrc != HvLpEvent_Rc_Good)
139                 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
140
141         down(&Semaphore);
142
143         vlanMap = HvLpConfig_getVirtualLanIndexMap();
144
145         buf[HW_PAGE_SIZE-1] = '\0';
146         seq_printf(m, "%s", buf);
147
148         dma_unmap_single(iSeries_vio_dev, handle, HW_PAGE_SIZE,
149                          DMA_FROM_DEVICE);
150         kfree(buf);
151
152         seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
153
154         node = of_find_node_by_path("/");
155         sysid = NULL;
156         if (node != NULL)
157                 sysid = of_get_property(node, "system-id", NULL);
158
159         if (sysid == NULL)
160                 seq_printf(m, "SRLNBR=<UNKNOWN>\n");
161         else
162                 /* Skip "IBM," on front of serial number, see dt.c */
163                 seq_printf(m, "SRLNBR=%s\n", sysid + 4);
164
165         of_node_put(node);
166
167         return 0;
168 }
169
170 static int proc_viopath_open(struct inode *inode, struct file *file)
171 {
172         return single_open(file, proc_viopath_show, NULL);
173 }
174
175 static const struct file_operations proc_viopath_operations = {
176         .open           = proc_viopath_open,
177         .read           = seq_read,
178         .llseek         = seq_lseek,
179         .release        = single_release,
180 };
181
182 static int __init vio_proc_init(void)
183 {
184         struct proc_dir_entry *e;
185
186         if (!firmware_has_feature(FW_FEATURE_ISERIES))
187                 return 0;
188
189         e = create_proc_entry("iSeries/config", 0, NULL);
190         if (e)
191                 e->proc_fops = &proc_viopath_operations;
192
193         return 0;
194 }
195 __initcall(vio_proc_init);
196
197 /* See if a given LP is active.  Allow for invalid lps to be passed in
198  * and just return invalid
199  */
200 int viopath_isactive(HvLpIndex lp)
201 {
202         if (lp == HvLpIndexInvalid)
203                 return 0;
204         if (lp < HVMAXARCHITECTEDLPS)
205                 return viopathStatus[lp].isActive;
206         else
207                 return 0;
208 }
209 EXPORT_SYMBOL(viopath_isactive);
210
211 /*
212  * We cache the source and target instance ids for each
213  * partition.
214  */
215 HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
216 {
217         return viopathStatus[lp].mSourceInst;
218 }
219 EXPORT_SYMBOL(viopath_sourceinst);
220
221 HvLpInstanceId viopath_targetinst(HvLpIndex lp)
222 {
223         return viopathStatus[lp].mTargetInst;
224 }
225 EXPORT_SYMBOL(viopath_targetinst);
226
227 /*
228  * Send a monitor message.  This is a message with the acknowledge
229  * bit on that the other side will NOT explicitly acknowledge.  When
230  * the other side goes down, the hypervisor will acknowledge any
231  * outstanding messages....so we will know when the other side dies.
232  */
233 static void sendMonMsg(HvLpIndex remoteLp)
234 {
235         HvLpEvent_Rc hvrc;
236
237         viopathStatus[remoteLp].mSourceInst =
238                 HvCallEvent_getSourceLpInstanceId(remoteLp,
239                                 HvLpEvent_Type_VirtualIo);
240         viopathStatus[remoteLp].mTargetInst =
241                 HvCallEvent_getTargetLpInstanceId(remoteLp,
242                                 HvLpEvent_Type_VirtualIo);
243
244         /*
245          * Deliberately ignore the return code here.  if we call this
246          * more than once, we don't care.
247          */
248         vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
249
250         hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
251                         viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
252                         HvLpEvent_AckType_DeferredAck,
253                         viopathStatus[remoteLp].mSourceInst,
254                         viopathStatus[remoteLp].mTargetInst,
255                         viomonseq++, 0, 0, 0, 0, 0);
256
257         if (hvrc == HvLpEvent_Rc_Good)
258                 viopathStatus[remoteLp].isActive = 1;
259         else {
260                 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
261                                 remoteLp);
262                 viopathStatus[remoteLp].isActive = 0;
263         }
264 }
265
266 static void handleMonitorEvent(struct HvLpEvent *event)
267 {
268         HvLpIndex remoteLp;
269         int i;
270
271         /*
272          * This handler is _also_ called as part of the loop
273          * at the end of this routine, so it must be able to
274          * ignore NULL events...
275          */
276         if (!event)
277                 return;
278
279         /*
280          * First see if this is just a normal monitor message from the
281          * other partition
282          */
283         if (hvlpevent_is_int(event)) {
284                 remoteLp = event->xSourceLp;
285                 if (!viopathStatus[remoteLp].isActive)
286                         sendMonMsg(remoteLp);
287                 return;
288         }
289
290         /*
291          * This path is for an acknowledgement; the other partition
292          * died
293          */
294         remoteLp = event->xTargetLp;
295         if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
296             (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
297                 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
298                 return;
299         }
300
301         printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
302
303         viopathStatus[remoteLp].isActive = 0;
304
305         /*
306          * For each active handler, pass them a NULL
307          * message to indicate that the other partition
308          * died
309          */
310         for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
311                 if (vio_handler[i] != NULL)
312                         (*vio_handler[i])(NULL);
313         }
314 }
315
316 int vio_setHandler(int subtype, vio_event_handler_t *beh)
317 {
318         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
319         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
320                 return -EINVAL;
321         if (vio_handler[subtype] != NULL)
322                 return -EBUSY;
323         vio_handler[subtype] = beh;
324         return 0;
325 }
326 EXPORT_SYMBOL(vio_setHandler);
327
328 int vio_clearHandler(int subtype)
329 {
330         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
331         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
332                 return -EINVAL;
333         if (vio_handler[subtype] == NULL)
334                 return -EAGAIN;
335         vio_handler[subtype] = NULL;
336         return 0;
337 }
338 EXPORT_SYMBOL(vio_clearHandler);
339
340 static void handleConfig(struct HvLpEvent *event)
341 {
342         if (!event)
343                 return;
344         if (hvlpevent_is_int(event)) {
345                 printk(VIOPATH_KERN_WARN
346                        "unexpected config request from partition %d",
347                        event->xSourceLp);
348
349                 if (hvlpevent_need_ack(event)) {
350                         event->xRc = HvLpEvent_Rc_InvalidSubtype;
351                         HvCallEvent_ackLpEvent(event);
352                 }
353                 return;
354         }
355
356         up((struct semaphore *)event->xCorrelationToken);
357 }
358
359 /*
360  * Initialization of the hosting partition
361  */
362 void vio_set_hostlp(void)
363 {
364         /*
365          * If this has already been set then we DON'T want to either change
366          * it or re-register the proc file system
367          */
368         if (viopath_hostLp != HvLpIndexInvalid)
369                 return;
370
371         /*
372          * Figure out our hosting partition.  This isn't allowed to change
373          * while we're active
374          */
375         viopath_ourLp = HvLpConfig_getLpIndex();
376         viopath_hostLp = HvLpConfig_getHostingLpIndex(viopath_ourLp);
377
378         if (viopath_hostLp != HvLpIndexInvalid)
379                 vio_setHandler(viomajorsubtype_config, handleConfig);
380 }
381 EXPORT_SYMBOL(vio_set_hostlp);
382
383 static void vio_handleEvent(struct HvLpEvent *event)
384 {
385         HvLpIndex remoteLp;
386         int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
387                 >> VIOMAJOR_SUBTYPE_SHIFT;
388
389         if (hvlpevent_is_int(event)) {
390                 remoteLp = event->xSourceLp;
391                 /*
392                  * The isActive is checked because if the hosting partition
393                  * went down and came back up it would not be active but it
394                  * would have different source and target instances, in which
395                  * case we'd want to reset them.  This case really protects
396                  * against an unauthorized active partition sending interrupts
397                  * or acks to this linux partition.
398                  */
399                 if (viopathStatus[remoteLp].isActive
400                     && (event->xSourceInstanceId !=
401                         viopathStatus[remoteLp].mTargetInst)) {
402                         printk(VIOPATH_KERN_WARN
403                                "message from invalid partition. "
404                                "int msg rcvd, source inst (%d) doesnt match (%d)\n",
405                                viopathStatus[remoteLp].mTargetInst,
406                                event->xSourceInstanceId);
407                         return;
408                 }
409
410                 if (viopathStatus[remoteLp].isActive
411                     && (event->xTargetInstanceId !=
412                         viopathStatus[remoteLp].mSourceInst)) {
413                         printk(VIOPATH_KERN_WARN
414                                "message from invalid partition. "
415                                "int msg rcvd, target inst (%d) doesnt match (%d)\n",
416                                viopathStatus[remoteLp].mSourceInst,
417                                event->xTargetInstanceId);
418                         return;
419                 }
420         } else {
421                 remoteLp = event->xTargetLp;
422                 if (event->xSourceInstanceId !=
423                     viopathStatus[remoteLp].mSourceInst) {
424                         printk(VIOPATH_KERN_WARN
425                                "message from invalid partition. "
426                                "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
427                                viopathStatus[remoteLp].mSourceInst,
428                                event->xSourceInstanceId);
429                         return;
430                 }
431
432                 if (event->xTargetInstanceId !=
433                     viopathStatus[remoteLp].mTargetInst) {
434                         printk(VIOPATH_KERN_WARN
435                                "message from invalid partition. "
436                                "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
437                                viopathStatus[remoteLp].mTargetInst,
438                                event->xTargetInstanceId);
439                         return;
440                 }
441         }
442
443         if (vio_handler[subtype] == NULL) {
444                 printk(VIOPATH_KERN_WARN
445                        "unexpected virtual io event subtype %d from partition %d\n",
446                        event->xSubtype, remoteLp);
447                 /* No handler.  Ack if necessary */
448                 if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
449                         event->xRc = HvLpEvent_Rc_InvalidSubtype;
450                         HvCallEvent_ackLpEvent(event);
451                 }
452                 return;
453         }
454
455         /* This innocuous little line is where all the real work happens */
456         (*vio_handler[subtype])(event);
457 }
458
459 static void viopath_donealloc(void *parm, int number)
460 {
461         struct alloc_parms *parmsp = parm;
462
463         parmsp->number = number;
464         if (parmsp->used_wait_atomic)
465                 atomic_set(&parmsp->wait_atomic, 0);
466         else
467                 up(&parmsp->sem);
468 }
469
470 static int allocateEvents(HvLpIndex remoteLp, int numEvents)
471 {
472         struct alloc_parms parms;
473
474         if (system_state != SYSTEM_RUNNING) {
475                 parms.used_wait_atomic = 1;
476                 atomic_set(&parms.wait_atomic, 1);
477         } else {
478                 parms.used_wait_atomic = 0;
479                 init_MUTEX_LOCKED(&parms.sem);
480         }
481         mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250,  /* It would be nice to put a real number here! */
482                             numEvents, &viopath_donealloc, &parms);
483         if (system_state != SYSTEM_RUNNING) {
484                 while (atomic_read(&parms.wait_atomic))
485                         mb();
486         } else
487                 down(&parms.sem);
488         return parms.number;
489 }
490
491 int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
492 {
493         int i;
494         unsigned long flags;
495         int tempNumAllocated;
496
497         if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
498                 return -EINVAL;
499
500         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
501         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
502                 return -EINVAL;
503
504         spin_lock_irqsave(&statuslock, flags);
505
506         if (!event_buffer_initialised) {
507                 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
508                         atomic_set(&event_buffer_available[i], 1);
509                 event_buffer_initialised = 1;
510         }
511
512         viopathStatus[remoteLp].users[subtype]++;
513
514         if (!viopathStatus[remoteLp].isOpen) {
515                 viopathStatus[remoteLp].isOpen = 1;
516                 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
517
518                 /*
519                  * Don't hold the spinlock during an operation that
520                  * can sleep.
521                  */
522                 spin_unlock_irqrestore(&statuslock, flags);
523                 tempNumAllocated = allocateEvents(remoteLp, 1);
524                 spin_lock_irqsave(&statuslock, flags);
525
526                 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
527
528                 if (viopathStatus[remoteLp].numberAllocated == 0) {
529                         HvCallEvent_closeLpEventPath(remoteLp,
530                                         HvLpEvent_Type_VirtualIo);
531
532                         spin_unlock_irqrestore(&statuslock, flags);
533                         return -ENOMEM;
534                 }
535
536                 viopathStatus[remoteLp].mSourceInst =
537                         HvCallEvent_getSourceLpInstanceId(remoteLp,
538                                         HvLpEvent_Type_VirtualIo);
539                 viopathStatus[remoteLp].mTargetInst =
540                         HvCallEvent_getTargetLpInstanceId(remoteLp,
541                                         HvLpEvent_Type_VirtualIo);
542                 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
543                                           &vio_handleEvent);
544                 sendMonMsg(remoteLp);
545                 printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
546                                 "setting sinst %d, tinst %d\n",
547                                 remoteLp, viopathStatus[remoteLp].mSourceInst,
548                                 viopathStatus[remoteLp].mTargetInst);
549         }
550
551         spin_unlock_irqrestore(&statuslock, flags);
552         tempNumAllocated = allocateEvents(remoteLp, numReq);
553         spin_lock_irqsave(&statuslock, flags);
554         viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
555         spin_unlock_irqrestore(&statuslock, flags);
556
557         return 0;
558 }
559 EXPORT_SYMBOL(viopath_open);
560
561 int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
562 {
563         unsigned long flags;
564         int i;
565         int numOpen;
566         struct alloc_parms parms;
567
568         if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
569                 return -EINVAL;
570
571         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
572         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
573                 return -EINVAL;
574
575         spin_lock_irqsave(&statuslock, flags);
576         /*
577          * If the viopath_close somehow gets called before a
578          * viopath_open it could decrement to -1 which is a non
579          * recoverable state so we'll prevent this from
580          * happening.
581          */
582         if (viopathStatus[remoteLp].users[subtype] > 0)
583                 viopathStatus[remoteLp].users[subtype]--;
584
585         spin_unlock_irqrestore(&statuslock, flags);
586
587         parms.used_wait_atomic = 0;
588         init_MUTEX_LOCKED(&parms.sem);
589         mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo,
590                               numReq, &viopath_donealloc, &parms);
591         down(&parms.sem);
592
593         spin_lock_irqsave(&statuslock, flags);
594         for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
595                 numOpen += viopathStatus[remoteLp].users[i];
596
597         if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
598                 printk(VIOPATH_KERN_INFO "closing connection to partition %d",
599                                 remoteLp);
600
601                 HvCallEvent_closeLpEventPath(remoteLp,
602                                              HvLpEvent_Type_VirtualIo);
603                 viopathStatus[remoteLp].isOpen = 0;
604                 viopathStatus[remoteLp].isActive = 0;
605
606                 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
607                         atomic_set(&event_buffer_available[i], 0);
608                 event_buffer_initialised = 0;
609         }
610         spin_unlock_irqrestore(&statuslock, flags);
611         return 0;
612 }
613 EXPORT_SYMBOL(viopath_close);
614
615 void *vio_get_event_buffer(int subtype)
616 {
617         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
618         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
619                 return NULL;
620
621         if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
622                 return &event_buffer[subtype * 256];
623         else
624                 return NULL;
625 }
626 EXPORT_SYMBOL(vio_get_event_buffer);
627
628 void vio_free_event_buffer(int subtype, void *buffer)
629 {
630         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
631         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
632                 printk(VIOPATH_KERN_WARN
633                        "unexpected subtype %d freeing event buffer\n", subtype);
634                 return;
635         }
636
637         if (atomic_read(&event_buffer_available[subtype]) != 0) {
638                 printk(VIOPATH_KERN_WARN
639                        "freeing unallocated event buffer, subtype %d\n",
640                        subtype);
641                 return;
642         }
643
644         if (buffer != &event_buffer[subtype * 256]) {
645                 printk(VIOPATH_KERN_WARN
646                        "freeing invalid event buffer, subtype %d\n", subtype);
647         }
648
649         atomic_set(&event_buffer_available[subtype], 1);
650 }
651 EXPORT_SYMBOL(vio_free_event_buffer);
652
653 static const struct vio_error_entry vio_no_error =
654     { 0, 0, "Non-VIO Error" };
655 static const struct vio_error_entry vio_unknown_error =
656     { 0, EIO, "Unknown Error" };
657
658 static const struct vio_error_entry vio_default_errors[] = {
659         {0x0001, EIO, "No Connection"},
660         {0x0002, EIO, "No Receiver"},
661         {0x0003, EIO, "No Buffer Available"},
662         {0x0004, EBADRQC, "Invalid Message Type"},
663         {0x0000, 0, NULL},
664 };
665
666 const struct vio_error_entry *vio_lookup_rc(
667                 const struct vio_error_entry *local_table, u16 rc)
668 {
669         const struct vio_error_entry *cur;
670
671         if (!rc)
672                 return &vio_no_error;
673         if (local_table)
674                 for (cur = local_table; cur->rc; ++cur)
675                         if (cur->rc == rc)
676                                 return cur;
677         for (cur = vio_default_errors; cur->rc; ++cur)
678                 if (cur->rc == rc)
679                         return cur;
680         return &vio_unknown_error;
681 }
682 EXPORT_SYMBOL(vio_lookup_rc);