Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-for-linus-2.6
[linux-2.6] / arch / ppc64 / kernel / mf.c
1 /*
2   * mf.c
3   * Copyright (C) 2001 Troy D. Armstrong  IBM Corporation
4   * Copyright (C) 2004-2005 Stephen Rothwell  IBM Corporation
5   *
6   * This modules exists as an interface between a Linux secondary partition
7   * running on an iSeries and the primary partition's Virtual Service
8   * Processor (VSP) object.  The VSP has final authority over powering on/off
9   * all partitions in the iSeries.  It also provides miscellaneous low-level
10   * machine facility type operations.
11   *
12   *
13   * This program is free software; you can redistribute it and/or modify
14   * it under the terms of the GNU General Public License as published by
15   * the Free Software Foundation; either version 2 of the License, or
16   * (at your option) any later version.
17   *
18   * This program is distributed in the hope that it will be useful,
19   * but WITHOUT ANY WARRANTY; without even the implied warranty of
20   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21   * GNU General Public License for more details.
22   *
23   * You should have received a copy of the GNU General Public License
24   * along with this program; if not, write to the Free Software
25   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
26   */
27
28 #include <linux/types.h>
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/init.h>
32 #include <linux/completion.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/bcd.h>
36
37 #include <asm/time.h>
38 #include <asm/uaccess.h>
39 #include <asm/paca.h>
40 #include <asm/iSeries/vio.h>
41 #include <asm/iSeries/mf.h>
42 #include <asm/iSeries/HvLpConfig.h>
43 #include <asm/iSeries/ItLpQueue.h>
44
45 /*
46  * This is the structure layout for the Machine Facilites LPAR event
47  * flows.
48  */
49 struct vsp_cmd_data {
50         u64 token;
51         u16 cmd;
52         HvLpIndex lp_index;
53         u8 result_code;
54         u32 reserved;
55         union {
56                 u64 state;      /* GetStateOut */
57                 u64 ipl_type;   /* GetIplTypeOut, Function02SelectIplTypeIn */
58                 u64 ipl_mode;   /* GetIplModeOut, Function02SelectIplModeIn */
59                 u64 page[4];    /* GetSrcHistoryIn */
60                 u64 flag;       /* GetAutoIplWhenPrimaryIplsOut,
61                                    SetAutoIplWhenPrimaryIplsIn,
62                                    WhiteButtonPowerOffIn,
63                                    Function08FastPowerOffIn,
64                                    IsSpcnRackPowerIncompleteOut */
65                 struct {
66                         u64 token;
67                         u64 address_type;
68                         u64 side;
69                         u32 length;
70                         u32 offset;
71                 } kern;         /* SetKernelImageIn, GetKernelImageIn,
72                                    SetKernelCmdLineIn, GetKernelCmdLineIn */
73                 u32 length_out; /* GetKernelImageOut, GetKernelCmdLineOut */
74                 u8 reserved[80];
75         } sub_data;
76 };
77
78 struct vsp_rsp_data {
79         struct completion com;
80         struct vsp_cmd_data *response;
81 };
82
83 struct alloc_data {
84         u16 size;
85         u16 type;
86         u32 count;
87         u16 reserved1;
88         u8 reserved2;
89         HvLpIndex target_lp;
90 };
91
92 struct ce_msg_data;
93
94 typedef void (*ce_msg_comp_hdlr)(void *token, struct ce_msg_data *vsp_cmd_rsp);
95
96 struct ce_msg_comp_data {
97         ce_msg_comp_hdlr handler;
98         void *token;
99 };
100
101 struct ce_msg_data {
102         u8 ce_msg[12];
103         char reserved[4];
104         struct ce_msg_comp_data *completion;
105 };
106
107 struct io_mf_lp_event {
108         struct HvLpEvent hp_lp_event;
109         u16 subtype_result_code;
110         u16 reserved1;
111         u32 reserved2;
112         union {
113                 struct alloc_data alloc;
114                 struct ce_msg_data ce_msg;
115                 struct vsp_cmd_data vsp_cmd;
116         } data;
117 };
118
119 #define subtype_data(a, b, c, d)        \
120                 (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
121
122 /*
123  * All outgoing event traffic is kept on a FIFO queue.  The first
124  * pointer points to the one that is outstanding, and all new
125  * requests get stuck on the end.  Also, we keep a certain number of
126  * preallocated pending events so that we can operate very early in
127  * the boot up sequence (before kmalloc is ready).
128  */
129 struct pending_event {
130         struct pending_event *next;
131         struct io_mf_lp_event event;
132         MFCompleteHandler hdlr;
133         char dma_data[72];
134         unsigned dma_data_length;
135         unsigned remote_address;
136 };
137 static spinlock_t pending_event_spinlock;
138 static struct pending_event *pending_event_head;
139 static struct pending_event *pending_event_tail;
140 static struct pending_event *pending_event_avail;
141 static struct pending_event pending_event_prealloc[16];
142
143 /*
144  * Put a pending event onto the available queue, so it can get reused.
145  * Attention! You must have the pending_event_spinlock before calling!
146  */
147 static void free_pending_event(struct pending_event *ev)
148 {
149         if (ev != NULL) {
150                 ev->next = pending_event_avail;
151                 pending_event_avail = ev;
152         }
153 }
154
155 /*
156  * Enqueue the outbound event onto the stack.  If the queue was
157  * empty to begin with, we must also issue it via the Hypervisor
158  * interface.  There is a section of code below that will touch
159  * the first stack pointer without the protection of the pending_event_spinlock.
160  * This is OK, because we know that nobody else will be modifying
161  * the first pointer when we do this.
162  */
163 static int signal_event(struct pending_event *ev)
164 {
165         int rc = 0;
166         unsigned long flags;
167         int go = 1;
168         struct pending_event *ev1;
169         HvLpEvent_Rc hv_rc;
170
171         /* enqueue the event */
172         if (ev != NULL) {
173                 ev->next = NULL;
174                 spin_lock_irqsave(&pending_event_spinlock, flags);
175                 if (pending_event_head == NULL)
176                         pending_event_head = ev;
177                 else {
178                         go = 0;
179                         pending_event_tail->next = ev;
180                 }
181                 pending_event_tail = ev;
182                 spin_unlock_irqrestore(&pending_event_spinlock, flags);
183         }
184
185         /* send the event */
186         while (go) {
187                 go = 0;
188
189                 /* any DMA data to send beforehand? */
190                 if (pending_event_head->dma_data_length > 0)
191                         HvCallEvent_dmaToSp(pending_event_head->dma_data,
192                                         pending_event_head->remote_address,
193                                         pending_event_head->dma_data_length,
194                                         HvLpDma_Direction_LocalToRemote);
195
196                 hv_rc = HvCallEvent_signalLpEvent(
197                                 &pending_event_head->event.hp_lp_event);
198                 if (hv_rc != HvLpEvent_Rc_Good) {
199                         printk(KERN_ERR "mf.c: HvCallEvent_signalLpEvent() "
200                                         "failed with %d\n", (int)hv_rc);
201
202                         spin_lock_irqsave(&pending_event_spinlock, flags);
203                         ev1 = pending_event_head;
204                         pending_event_head = pending_event_head->next;
205                         if (pending_event_head != NULL)
206                                 go = 1;
207                         spin_unlock_irqrestore(&pending_event_spinlock, flags);
208
209                         if (ev1 == ev)
210                                 rc = -EIO;
211                         else if (ev1->hdlr != NULL)
212                                 (*ev1->hdlr)((void *)ev1->event.hp_lp_event.xCorrelationToken, -EIO);
213
214                         spin_lock_irqsave(&pending_event_spinlock, flags);
215                         free_pending_event(ev1);
216                         spin_unlock_irqrestore(&pending_event_spinlock, flags);
217                 }
218         }
219
220         return rc;
221 }
222
223 /*
224  * Allocate a new pending_event structure, and initialize it.
225  */
226 static struct pending_event *new_pending_event(void)
227 {
228         struct pending_event *ev = NULL;
229         HvLpIndex primary_lp = HvLpConfig_getPrimaryLpIndex();
230         unsigned long flags;
231         struct HvLpEvent *hev;
232
233         spin_lock_irqsave(&pending_event_spinlock, flags);
234         if (pending_event_avail != NULL) {
235                 ev = pending_event_avail;
236                 pending_event_avail = pending_event_avail->next;
237         }
238         spin_unlock_irqrestore(&pending_event_spinlock, flags);
239         if (ev == NULL) {
240                 ev = kmalloc(sizeof(struct pending_event), GFP_ATOMIC);
241                 if (ev == NULL) {
242                         printk(KERN_ERR "mf.c: unable to kmalloc %ld bytes\n",
243                                         sizeof(struct pending_event));
244                         return NULL;
245                 }
246         }
247         memset(ev, 0, sizeof(struct pending_event));
248         hev = &ev->event.hp_lp_event;
249         hev->xFlags.xValid = 1;
250         hev->xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
251         hev->xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
252         hev->xFlags.xFunction = HvLpEvent_Function_Int;
253         hev->xType = HvLpEvent_Type_MachineFac;
254         hev->xSourceLp = HvLpConfig_getLpIndex();
255         hev->xTargetLp = primary_lp;
256         hev->xSizeMinus1 = sizeof(ev->event) - 1;
257         hev->xRc = HvLpEvent_Rc_Good;
258         hev->xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primary_lp,
259                         HvLpEvent_Type_MachineFac);
260         hev->xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primary_lp,
261                         HvLpEvent_Type_MachineFac);
262
263         return ev;
264 }
265
266 static int signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
267 {
268         struct pending_event *ev = new_pending_event();
269         int rc;
270         struct vsp_rsp_data response;
271
272         if (ev == NULL)
273                 return -ENOMEM;
274
275         init_completion(&response.com);
276         response.response = vsp_cmd;
277         ev->event.hp_lp_event.xSubtype = 6;
278         ev->event.hp_lp_event.x.xSubtypeData =
279                 subtype_data('M', 'F',  'V',  'I');
280         ev->event.data.vsp_cmd.token = (u64)&response;
281         ev->event.data.vsp_cmd.cmd = vsp_cmd->cmd;
282         ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
283         ev->event.data.vsp_cmd.result_code = 0xFF;
284         ev->event.data.vsp_cmd.reserved = 0;
285         memcpy(&(ev->event.data.vsp_cmd.sub_data),
286                         &(vsp_cmd->sub_data), sizeof(vsp_cmd->sub_data));
287         mb();
288
289         rc = signal_event(ev);
290         if (rc == 0)
291                 wait_for_completion(&response.com);
292         return rc;
293 }
294
295
296 /*
297  * Send a 12-byte CE message to the primary partition VSP object
298  */
299 static int signal_ce_msg(char *ce_msg, struct ce_msg_comp_data *completion)
300 {
301         struct pending_event *ev = new_pending_event();
302
303         if (ev == NULL)
304                 return -ENOMEM;
305
306         ev->event.hp_lp_event.xSubtype = 0;
307         ev->event.hp_lp_event.x.xSubtypeData =
308                 subtype_data('M',  'F',  'C',  'E');
309         memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
310         ev->event.data.ce_msg.completion = completion;
311         return signal_event(ev);
312 }
313
314 /*
315  * Send a 12-byte CE message (with no data) to the primary partition VSP object
316  */
317 static int signal_ce_msg_simple(u8 ce_op, struct ce_msg_comp_data *completion)
318 {
319         u8 ce_msg[12];
320
321         memset(ce_msg, 0, sizeof(ce_msg));
322         ce_msg[3] = ce_op;
323         return signal_ce_msg(ce_msg, completion);
324 }
325
326 /*
327  * Send a 12-byte CE message and DMA data to the primary partition VSP object
328  */
329 static int dma_and_signal_ce_msg(char *ce_msg,
330                 struct ce_msg_comp_data *completion, void *dma_data,
331                 unsigned dma_data_length, unsigned remote_address)
332 {
333         struct pending_event *ev = new_pending_event();
334
335         if (ev == NULL)
336                 return -ENOMEM;
337
338         ev->event.hp_lp_event.xSubtype = 0;
339         ev->event.hp_lp_event.x.xSubtypeData =
340                 subtype_data('M', 'F', 'C', 'E');
341         memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
342         ev->event.data.ce_msg.completion = completion;
343         memcpy(ev->dma_data, dma_data, dma_data_length);
344         ev->dma_data_length = dma_data_length;
345         ev->remote_address = remote_address;
346         return signal_event(ev);
347 }
348
349 /*
350  * Initiate a nice (hopefully) shutdown of Linux.  We simply are
351  * going to try and send the init process a SIGINT signal.  If
352  * this fails (why?), we'll simply force it off in a not-so-nice
353  * manner.
354  */
355 static int shutdown(void)
356 {
357         int rc = kill_proc(1, SIGINT, 1);
358
359         if (rc) {
360                 printk(KERN_ALERT "mf.c: SIGINT to init failed (%d), "
361                                 "hard shutdown commencing\n", rc);
362                 mf_power_off();
363         } else
364                 printk(KERN_INFO "mf.c: init has been successfully notified "
365                                 "to proceed with shutdown\n");
366         return rc;
367 }
368
369 /*
370  * The primary partition VSP object is sending us a new
371  * event flow.  Handle it...
372  */
373 static void handle_int(struct io_mf_lp_event *event)
374 {
375         struct ce_msg_data *ce_msg_data;
376         struct ce_msg_data *pce_msg_data;
377         unsigned long flags;
378         struct pending_event *pev;
379
380         /* ack the interrupt */
381         event->hp_lp_event.xRc = HvLpEvent_Rc_Good;
382         HvCallEvent_ackLpEvent(&event->hp_lp_event);
383
384         /* process interrupt */
385         switch (event->hp_lp_event.xSubtype) {
386         case 0: /* CE message */
387                 ce_msg_data = &event->data.ce_msg;
388                 switch (ce_msg_data->ce_msg[3]) {
389                 case 0x5B:      /* power control notification */
390                         if ((ce_msg_data->ce_msg[5] & 0x20) != 0) {
391                                 printk(KERN_INFO "mf.c: Commencing partition shutdown\n");
392                                 if (shutdown() == 0)
393                                         signal_ce_msg_simple(0xDB, NULL);
394                         }
395                         break;
396                 case 0xC0:      /* get time */
397                         spin_lock_irqsave(&pending_event_spinlock, flags);
398                         pev = pending_event_head;
399                         if (pev != NULL)
400                                 pending_event_head = pending_event_head->next;
401                         spin_unlock_irqrestore(&pending_event_spinlock, flags);
402                         if (pev == NULL)
403                                 break;
404                         pce_msg_data = &pev->event.data.ce_msg;
405                         if (pce_msg_data->ce_msg[3] != 0x40)
406                                 break;
407                         if (pce_msg_data->completion != NULL) {
408                                 ce_msg_comp_hdlr handler =
409                                         pce_msg_data->completion->handler;
410                                 void *token = pce_msg_data->completion->token;
411
412                                 if (handler != NULL)
413                                         (*handler)(token, ce_msg_data);
414                         }
415                         spin_lock_irqsave(&pending_event_spinlock, flags);
416                         free_pending_event(pev);
417                         spin_unlock_irqrestore(&pending_event_spinlock, flags);
418                         /* send next waiting event */
419                         if (pending_event_head != NULL)
420                                 signal_event(NULL);
421                         break;
422                 }
423                 break;
424         case 1: /* IT sys shutdown */
425                 printk(KERN_INFO "mf.c: Commencing system shutdown\n");
426                 shutdown();
427                 break;
428         }
429 }
430
431 /*
432  * The primary partition VSP object is acknowledging the receipt
433  * of a flow we sent to them.  If there are other flows queued
434  * up, we must send another one now...
435  */
436 static void handle_ack(struct io_mf_lp_event *event)
437 {
438         unsigned long flags;
439         struct pending_event *two = NULL;
440         unsigned long free_it = 0;
441         struct ce_msg_data *ce_msg_data;
442         struct ce_msg_data *pce_msg_data;
443         struct vsp_rsp_data *rsp;
444
445         /* handle current event */
446         if (pending_event_head == NULL) {
447                 printk(KERN_ERR "mf.c: stack empty for receiving ack\n");
448                 return;
449         }
450
451         switch (event->hp_lp_event.xSubtype) {
452         case 0:     /* CE msg */
453                 ce_msg_data = &event->data.ce_msg;
454                 if (ce_msg_data->ce_msg[3] != 0x40) {
455                         free_it = 1;
456                         break;
457                 }
458                 if (ce_msg_data->ce_msg[2] == 0)
459                         break;
460                 free_it = 1;
461                 pce_msg_data = &pending_event_head->event.data.ce_msg;
462                 if (pce_msg_data->completion != NULL) {
463                         ce_msg_comp_hdlr handler =
464                                 pce_msg_data->completion->handler;
465                         void *token = pce_msg_data->completion->token;
466
467                         if (handler != NULL)
468                                 (*handler)(token, ce_msg_data);
469                 }
470                 break;
471         case 4: /* allocate */
472         case 5: /* deallocate */
473                 if (pending_event_head->hdlr != NULL)
474                         (*pending_event_head->hdlr)((void *)event->hp_lp_event.xCorrelationToken, event->data.alloc.count);
475                 free_it = 1;
476                 break;
477         case 6:
478                 free_it = 1;
479                 rsp = (struct vsp_rsp_data *)event->data.vsp_cmd.token;
480                 if (rsp == NULL) {
481                         printk(KERN_ERR "mf.c: no rsp\n");
482                         break;
483                 }
484                 if (rsp->response != NULL)
485                         memcpy(rsp->response, &event->data.vsp_cmd,
486                                         sizeof(event->data.vsp_cmd));
487                 complete(&rsp->com);
488                 break;
489         }
490
491         /* remove from queue */
492         spin_lock_irqsave(&pending_event_spinlock, flags);
493         if ((pending_event_head != NULL) && (free_it == 1)) {
494                 struct pending_event *oldHead = pending_event_head;
495
496                 pending_event_head = pending_event_head->next;
497                 two = pending_event_head;
498                 free_pending_event(oldHead);
499         }
500         spin_unlock_irqrestore(&pending_event_spinlock, flags);
501
502         /* send next waiting event */
503         if (two != NULL)
504                 signal_event(NULL);
505 }
506
507 /*
508  * This is the generic event handler we are registering with
509  * the Hypervisor.  Ensure the flows are for us, and then
510  * parse it enough to know if it is an interrupt or an
511  * acknowledge.
512  */
513 static void hv_handler(struct HvLpEvent *event, struct pt_regs *regs)
514 {
515         if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) {
516                 switch(event->xFlags.xFunction) {
517                 case HvLpEvent_Function_Ack:
518                         handle_ack((struct io_mf_lp_event *)event);
519                         break;
520                 case HvLpEvent_Function_Int:
521                         handle_int((struct io_mf_lp_event *)event);
522                         break;
523                 default:
524                         printk(KERN_ERR "mf.c: non ack/int event received\n");
525                         break;
526                 }
527         } else
528                 printk(KERN_ERR "mf.c: alien event received\n");
529 }
530
531 /*
532  * Global kernel interface to allocate and seed events into the
533  * Hypervisor.
534  */
535 void mf_allocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
536                 unsigned size, unsigned count, MFCompleteHandler hdlr,
537                 void *user_token)
538 {
539         struct pending_event *ev = new_pending_event();
540         int rc;
541
542         if (ev == NULL) {
543                 rc = -ENOMEM;
544         } else {
545                 ev->event.hp_lp_event.xSubtype = 4;
546                 ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
547                 ev->event.hp_lp_event.x.xSubtypeData =
548                         subtype_data('M', 'F', 'M', 'A');
549                 ev->event.data.alloc.target_lp = target_lp;
550                 ev->event.data.alloc.type = type;
551                 ev->event.data.alloc.size = size;
552                 ev->event.data.alloc.count = count;
553                 ev->hdlr = hdlr;
554                 rc = signal_event(ev);
555         }
556         if ((rc != 0) && (hdlr != NULL))
557                 (*hdlr)(user_token, rc);
558 }
559 EXPORT_SYMBOL(mf_allocate_lp_events);
560
561 /*
562  * Global kernel interface to unseed and deallocate events already in
563  * Hypervisor.
564  */
565 void mf_deallocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
566                 unsigned count, MFCompleteHandler hdlr, void *user_token)
567 {
568         struct pending_event *ev = new_pending_event();
569         int rc;
570
571         if (ev == NULL)
572                 rc = -ENOMEM;
573         else {
574                 ev->event.hp_lp_event.xSubtype = 5;
575                 ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
576                 ev->event.hp_lp_event.x.xSubtypeData =
577                         subtype_data('M', 'F', 'M', 'D');
578                 ev->event.data.alloc.target_lp = target_lp;
579                 ev->event.data.alloc.type = type;
580                 ev->event.data.alloc.count = count;
581                 ev->hdlr = hdlr;
582                 rc = signal_event(ev);
583         }
584         if ((rc != 0) && (hdlr != NULL))
585                 (*hdlr)(user_token, rc);
586 }
587 EXPORT_SYMBOL(mf_deallocate_lp_events);
588
589 /*
590  * Global kernel interface to tell the VSP object in the primary
591  * partition to power this partition off.
592  */
593 void mf_power_off(void)
594 {
595         printk(KERN_INFO "mf.c: Down it goes...\n");
596         signal_ce_msg_simple(0x4d, NULL);
597         for (;;)
598                 ;
599 }
600
601 /*
602  * Global kernel interface to tell the VSP object in the primary
603  * partition to reboot this partition.
604  */
605 void mf_reboot(void)
606 {
607         printk(KERN_INFO "mf.c: Preparing to bounce...\n");
608         signal_ce_msg_simple(0x4e, NULL);
609         for (;;)
610                 ;
611 }
612
613 /*
614  * Display a single word SRC onto the VSP control panel.
615  */
616 void mf_display_src(u32 word)
617 {
618         u8 ce[12];
619
620         memset(ce, 0, sizeof(ce));
621         ce[3] = 0x4a;
622         ce[7] = 0x01;
623         ce[8] = word >> 24;
624         ce[9] = word >> 16;
625         ce[10] = word >> 8;
626         ce[11] = word;
627         signal_ce_msg(ce, NULL);
628 }
629
630 /*
631  * Display a single word SRC of the form "PROGXXXX" on the VSP control panel.
632  */
633 void mf_display_progress(u16 value)
634 {
635         u8 ce[12];
636         u8 src[72];
637
638         memcpy(ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12);
639         memcpy(src, "\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00"
640                 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
641                 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
642                 "\x00\x00\x00\x00PROGxxxx                        ",
643                 72);
644         src[6] = value >> 8;
645         src[7] = value & 255;
646         src[44] = "0123456789ABCDEF"[(value >> 12) & 15];
647         src[45] = "0123456789ABCDEF"[(value >> 8) & 15];
648         src[46] = "0123456789ABCDEF"[(value >> 4) & 15];
649         src[47] = "0123456789ABCDEF"[value & 15];
650         dma_and_signal_ce_msg(ce, NULL, src, sizeof(src), 9 * 64 * 1024);
651 }
652
653 /*
654  * Clear the VSP control panel.  Used to "erase" an SRC that was
655  * previously displayed.
656  */
657 void mf_clear_src(void)
658 {
659         signal_ce_msg_simple(0x4b, NULL);
660 }
661
662 /*
663  * Initialization code here.
664  */
665 void mf_init(void)
666 {
667         int i;
668
669         /* initialize */
670         spin_lock_init(&pending_event_spinlock);
671         for (i = 0;
672              i < sizeof(pending_event_prealloc) / sizeof(*pending_event_prealloc);
673              ++i)
674                 free_pending_event(&pending_event_prealloc[i]);
675         HvLpEvent_registerHandler(HvLpEvent_Type_MachineFac, &hv_handler);
676
677         /* virtual continue ack */
678         signal_ce_msg_simple(0x57, NULL);
679
680         /* initialization complete */
681         printk(KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities "
682                         "initialized\n");
683 }
684
685 struct rtc_time_data {
686         struct completion com;
687         struct ce_msg_data ce_msg;
688         int rc;
689 };
690
691 static void get_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
692 {
693         struct rtc_time_data *rtc = token;
694
695         memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
696         rtc->rc = 0;
697         complete(&rtc->com);
698 }
699
700 static int rtc_set_tm(int rc, u8 *ce_msg, struct rtc_time *tm)
701 {
702         tm->tm_wday = 0;
703         tm->tm_yday = 0;
704         tm->tm_isdst = 0;
705         if (rc) {
706                 tm->tm_sec = 0;
707                 tm->tm_min = 0;
708                 tm->tm_hour = 0;
709                 tm->tm_mday = 15;
710                 tm->tm_mon = 5;
711                 tm->tm_year = 52;
712                 return rc;
713         }
714
715         if ((ce_msg[2] == 0xa9) ||
716             (ce_msg[2] == 0xaf)) {
717                 /* TOD clock is not set */
718                 tm->tm_sec = 1;
719                 tm->tm_min = 1;
720                 tm->tm_hour = 1;
721                 tm->tm_mday = 10;
722                 tm->tm_mon = 8;
723                 tm->tm_year = 71;
724                 mf_set_rtc(tm);
725         }
726         {
727                 u8 year = ce_msg[5];
728                 u8 sec = ce_msg[6];
729                 u8 min = ce_msg[7];
730                 u8 hour = ce_msg[8];
731                 u8 day = ce_msg[10];
732                 u8 mon = ce_msg[11];
733
734                 BCD_TO_BIN(sec);
735                 BCD_TO_BIN(min);
736                 BCD_TO_BIN(hour);
737                 BCD_TO_BIN(day);
738                 BCD_TO_BIN(mon);
739                 BCD_TO_BIN(year);
740
741                 if (year <= 69)
742                         year += 100;
743
744                 tm->tm_sec = sec;
745                 tm->tm_min = min;
746                 tm->tm_hour = hour;
747                 tm->tm_mday = day;
748                 tm->tm_mon = mon;
749                 tm->tm_year = year;
750         }
751
752         return 0;
753 }
754
755 int mf_get_rtc(struct rtc_time *tm)
756 {
757         struct ce_msg_comp_data ce_complete;
758         struct rtc_time_data rtc_data;
759         int rc;
760
761         memset(&ce_complete, 0, sizeof(ce_complete));
762         memset(&rtc_data, 0, sizeof(rtc_data));
763         init_completion(&rtc_data.com);
764         ce_complete.handler = &get_rtc_time_complete;
765         ce_complete.token = &rtc_data;
766         rc = signal_ce_msg_simple(0x40, &ce_complete);
767         if (rc)
768                 return rc;
769         wait_for_completion(&rtc_data.com);
770         return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
771 }
772
773 struct boot_rtc_time_data {
774         int busy;
775         struct ce_msg_data ce_msg;
776         int rc;
777 };
778
779 static void get_boot_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
780 {
781         struct boot_rtc_time_data *rtc = token;
782
783         memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
784         rtc->rc = 0;
785         rtc->busy = 0;
786 }
787
788 int mf_get_boot_rtc(struct rtc_time *tm)
789 {
790         struct ce_msg_comp_data ce_complete;
791         struct boot_rtc_time_data rtc_data;
792         int rc;
793
794         memset(&ce_complete, 0, sizeof(ce_complete));
795         memset(&rtc_data, 0, sizeof(rtc_data));
796         rtc_data.busy = 1;
797         ce_complete.handler = &get_boot_rtc_time_complete;
798         ce_complete.token = &rtc_data;
799         rc = signal_ce_msg_simple(0x40, &ce_complete);
800         if (rc)
801                 return rc;
802         /* We need to poll here as we are not yet taking interrupts */
803         while (rtc_data.busy) {
804                 if (hvlpevent_is_pending())
805                         process_hvlpevents(NULL);
806         }
807         return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
808 }
809
810 int mf_set_rtc(struct rtc_time *tm)
811 {
812         char ce_time[12];
813         u8 day, mon, hour, min, sec, y1, y2;
814         unsigned year;
815
816         year = 1900 + tm->tm_year;
817         y1 = year / 100;
818         y2 = year % 100;
819
820         sec = tm->tm_sec;
821         min = tm->tm_min;
822         hour = tm->tm_hour;
823         day = tm->tm_mday;
824         mon = tm->tm_mon + 1;
825
826         BIN_TO_BCD(sec);
827         BIN_TO_BCD(min);
828         BIN_TO_BCD(hour);
829         BIN_TO_BCD(mon);
830         BIN_TO_BCD(day);
831         BIN_TO_BCD(y1);
832         BIN_TO_BCD(y2);
833
834         memset(ce_time, 0, sizeof(ce_time));
835         ce_time[3] = 0x41;
836         ce_time[4] = y1;
837         ce_time[5] = y2;
838         ce_time[6] = sec;
839         ce_time[7] = min;
840         ce_time[8] = hour;
841         ce_time[10] = day;
842         ce_time[11] = mon;
843
844         return signal_ce_msg(ce_time, NULL);
845 }
846
847 #ifdef CONFIG_PROC_FS
848
849 static int proc_mf_dump_cmdline(char *page, char **start, off_t off,
850                 int count, int *eof, void *data)
851 {
852         int len;
853         char *p;
854         struct vsp_cmd_data vsp_cmd;
855         int rc;
856         dma_addr_t dma_addr;
857
858         /* The HV appears to return no more than 256 bytes of command line */
859         if (off >= 256)
860                 return 0;
861         if ((off + count) > 256)
862                 count = 256 - off;
863
864         dma_addr = dma_map_single(iSeries_vio_dev, page, off + count,
865                         DMA_FROM_DEVICE);
866         if (dma_mapping_error(dma_addr))
867                 return -ENOMEM;
868         memset(page, 0, off + count);
869         memset(&vsp_cmd, 0, sizeof(vsp_cmd));
870         vsp_cmd.cmd = 33;
871         vsp_cmd.sub_data.kern.token = dma_addr;
872         vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
873         vsp_cmd.sub_data.kern.side = (u64)data;
874         vsp_cmd.sub_data.kern.length = off + count;
875         mb();
876         rc = signal_vsp_instruction(&vsp_cmd);
877         dma_unmap_single(iSeries_vio_dev, dma_addr, off + count,
878                         DMA_FROM_DEVICE);
879         if (rc)
880                 return rc;
881         if (vsp_cmd.result_code != 0)
882                 return -ENOMEM;
883         p = page;
884         len = 0;
885         while (len < (off + count)) {
886                 if ((*p == '\0') || (*p == '\n')) {
887                         if (*p == '\0')
888                                 *p = '\n';
889                         p++;
890                         len++;
891                         *eof = 1;
892                         break;
893                 }
894                 p++;
895                 len++;
896         }
897
898         if (len < off) {
899                 *eof = 1;
900                 len = 0;
901         }
902         return len;
903 }
904
905 #if 0
906 static int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side)
907 {
908         struct vsp_cmd_data vsp_cmd;
909         int rc;
910         int len = *size;
911         dma_addr_t dma_addr;
912
913         dma_addr = dma_map_single(iSeries_vio_dev, buffer, len,
914                         DMA_FROM_DEVICE);
915         memset(buffer, 0, len);
916         memset(&vsp_cmd, 0, sizeof(vsp_cmd));
917         vsp_cmd.cmd = 32;
918         vsp_cmd.sub_data.kern.token = dma_addr;
919         vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
920         vsp_cmd.sub_data.kern.side = side;
921         vsp_cmd.sub_data.kern.offset = offset;
922         vsp_cmd.sub_data.kern.length = len;
923         mb();
924         rc = signal_vsp_instruction(&vsp_cmd);
925         if (rc == 0) {
926                 if (vsp_cmd.result_code == 0)
927                         *size = vsp_cmd.sub_data.length_out;
928                 else
929                         rc = -ENOMEM;
930         }
931
932         dma_unmap_single(iSeries_vio_dev, dma_addr, len, DMA_FROM_DEVICE);
933
934         return rc;
935 }
936
937 static int proc_mf_dump_vmlinux(char *page, char **start, off_t off,
938                 int count, int *eof, void *data)
939 {
940         int sizeToGet = count;
941
942         if (!capable(CAP_SYS_ADMIN))
943                 return -EACCES;
944
945         if (mf_getVmlinuxChunk(page, &sizeToGet, off, (u64)data) == 0) {
946                 if (sizeToGet != 0) {
947                         *start = page + off;
948                         return sizeToGet;
949                 }
950                 *eof = 1;
951                 return 0;
952         }
953         *eof = 1;
954         return 0;
955 }
956 #endif
957
958 static int proc_mf_dump_side(char *page, char **start, off_t off,
959                 int count, int *eof, void *data)
960 {
961         int len;
962         char mf_current_side = ' ';
963         struct vsp_cmd_data vsp_cmd;
964
965         memset(&vsp_cmd, 0, sizeof(vsp_cmd));
966         vsp_cmd.cmd = 2;
967         vsp_cmd.sub_data.ipl_type = 0;
968         mb();
969
970         if (signal_vsp_instruction(&vsp_cmd) == 0) {
971                 if (vsp_cmd.result_code == 0) {
972                         switch (vsp_cmd.sub_data.ipl_type) {
973                         case 0: mf_current_side = 'A';
974                                 break;
975                         case 1: mf_current_side = 'B';
976                                 break;
977                         case 2: mf_current_side = 'C';
978                                 break;
979                         default:        mf_current_side = 'D';
980                                 break;
981                         }
982                 }
983         }
984
985         len = sprintf(page, "%c\n", mf_current_side);
986
987         if (len <= (off + count))
988                 *eof = 1;
989         *start = page + off;
990         len -= off;
991         if (len > count)
992                 len = count;
993         if (len < 0)
994                 len = 0;
995         return len;
996 }
997
998 static int proc_mf_change_side(struct file *file, const char __user *buffer,
999                 unsigned long count, void *data)
1000 {
1001         char side;
1002         u64 newSide;
1003         struct vsp_cmd_data vsp_cmd;
1004
1005         if (!capable(CAP_SYS_ADMIN))
1006                 return -EACCES;
1007
1008         if (count == 0)
1009                 return 0;
1010
1011         if (get_user(side, buffer))
1012                 return -EFAULT;
1013
1014         switch (side) {
1015         case 'A':       newSide = 0;
1016                         break;
1017         case 'B':       newSide = 1;
1018                         break;
1019         case 'C':       newSide = 2;
1020                         break;
1021         case 'D':       newSide = 3;
1022                         break;
1023         default:
1024                 printk(KERN_ERR "mf_proc.c: proc_mf_change_side: invalid side\n");
1025                 return -EINVAL;
1026         }
1027
1028         memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1029         vsp_cmd.sub_data.ipl_type = newSide;
1030         vsp_cmd.cmd = 10;
1031
1032         (void)signal_vsp_instruction(&vsp_cmd);
1033
1034         return count;
1035 }
1036
1037 #if 0
1038 static void mf_getSrcHistory(char *buffer, int size)
1039 {
1040         struct IplTypeReturnStuff return_stuff;
1041         struct pending_event *ev = new_pending_event();
1042         int rc = 0;
1043         char *pages[4];
1044
1045         pages[0] = kmalloc(4096, GFP_ATOMIC);
1046         pages[1] = kmalloc(4096, GFP_ATOMIC);
1047         pages[2] = kmalloc(4096, GFP_ATOMIC);
1048         pages[3] = kmalloc(4096, GFP_ATOMIC);
1049         if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL)
1050                          || (pages[2] == NULL) || (pages[3] == NULL))
1051                 return -ENOMEM;
1052
1053         return_stuff.xType = 0;
1054         return_stuff.xRc = 0;
1055         return_stuff.xDone = 0;
1056         ev->event.hp_lp_event.xSubtype = 6;
1057         ev->event.hp_lp_event.x.xSubtypeData =
1058                 subtype_data('M', 'F', 'V', 'I');
1059         ev->event.data.vsp_cmd.xEvent = &return_stuff;
1060         ev->event.data.vsp_cmd.cmd = 4;
1061         ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
1062         ev->event.data.vsp_cmd.result_code = 0xFF;
1063         ev->event.data.vsp_cmd.reserved = 0;
1064         ev->event.data.vsp_cmd.sub_data.page[0] = ISERIES_HV_ADDR(pages[0]);
1065         ev->event.data.vsp_cmd.sub_data.page[1] = ISERIES_HV_ADDR(pages[1]);
1066         ev->event.data.vsp_cmd.sub_data.page[2] = ISERIES_HV_ADDR(pages[2]);
1067         ev->event.data.vsp_cmd.sub_data.page[3] = ISERIES_HV_ADDR(pages[3]);
1068         mb();
1069         if (signal_event(ev) != 0)
1070                 return;
1071
1072         while (return_stuff.xDone != 1)
1073                 udelay(10);
1074         if (return_stuff.xRc == 0)
1075                 memcpy(buffer, pages[0], size);
1076         kfree(pages[0]);
1077         kfree(pages[1]);
1078         kfree(pages[2]);
1079         kfree(pages[3]);
1080 }
1081 #endif
1082
1083 static int proc_mf_dump_src(char *page, char **start, off_t off,
1084                 int count, int *eof, void *data)
1085 {
1086 #if 0
1087         int len;
1088
1089         mf_getSrcHistory(page, count);
1090         len = count;
1091         len -= off;
1092         if (len < count) {
1093                 *eof = 1;
1094                 if (len <= 0)
1095                         return 0;
1096         } else
1097                 len = count;
1098         *start = page + off;
1099         return len;
1100 #else
1101         return 0;
1102 #endif
1103 }
1104
1105 static int proc_mf_change_src(struct file *file, const char __user *buffer,
1106                 unsigned long count, void *data)
1107 {
1108         char stkbuf[10];
1109
1110         if (!capable(CAP_SYS_ADMIN))
1111                 return -EACCES;
1112
1113         if ((count < 4) && (count != 1)) {
1114                 printk(KERN_ERR "mf_proc: invalid src\n");
1115                 return -EINVAL;
1116         }
1117
1118         if (count > (sizeof(stkbuf) - 1))
1119                 count = sizeof(stkbuf) - 1;
1120         if (copy_from_user(stkbuf, buffer, count))
1121                 return -EFAULT;
1122
1123         if ((count == 1) && (*stkbuf == '\0'))
1124                 mf_clear_src();
1125         else
1126                 mf_display_src(*(u32 *)stkbuf);
1127
1128         return count;
1129 }
1130
1131 static int proc_mf_change_cmdline(struct file *file, const char __user *buffer,
1132                 unsigned long count, void *data)
1133 {
1134         struct vsp_cmd_data vsp_cmd;
1135         dma_addr_t dma_addr;
1136         char *page;
1137         int ret = -EACCES;
1138
1139         if (!capable(CAP_SYS_ADMIN))
1140                 goto out;
1141
1142         dma_addr = 0;
1143         page = dma_alloc_coherent(iSeries_vio_dev, count, &dma_addr,
1144                         GFP_ATOMIC);
1145         ret = -ENOMEM;
1146         if (page == NULL)
1147                 goto out;
1148
1149         ret = -EFAULT;
1150         if (copy_from_user(page, buffer, count))
1151                 goto out_free;
1152
1153         memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1154         vsp_cmd.cmd = 31;
1155         vsp_cmd.sub_data.kern.token = dma_addr;
1156         vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1157         vsp_cmd.sub_data.kern.side = (u64)data;
1158         vsp_cmd.sub_data.kern.length = count;
1159         mb();
1160         (void)signal_vsp_instruction(&vsp_cmd);
1161         ret = count;
1162
1163 out_free:
1164         dma_free_coherent(iSeries_vio_dev, count, page, dma_addr);
1165 out:
1166         return ret;
1167 }
1168
1169 static ssize_t proc_mf_change_vmlinux(struct file *file,
1170                                       const char __user *buf,
1171                                       size_t count, loff_t *ppos)
1172 {
1173         struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
1174         ssize_t rc;
1175         dma_addr_t dma_addr;
1176         char *page;
1177         struct vsp_cmd_data vsp_cmd;
1178
1179         rc = -EACCES;
1180         if (!capable(CAP_SYS_ADMIN))
1181                 goto out;
1182
1183         dma_addr = 0;
1184         page = dma_alloc_coherent(iSeries_vio_dev, count, &dma_addr,
1185                         GFP_ATOMIC);
1186         rc = -ENOMEM;
1187         if (page == NULL) {
1188                 printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n");
1189                 goto out;
1190         }
1191         rc = -EFAULT;
1192         if (copy_from_user(page, buf, count))
1193                 goto out_free;
1194
1195         memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1196         vsp_cmd.cmd = 30;
1197         vsp_cmd.sub_data.kern.token = dma_addr;
1198         vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1199         vsp_cmd.sub_data.kern.side = (u64)dp->data;
1200         vsp_cmd.sub_data.kern.offset = *ppos;
1201         vsp_cmd.sub_data.kern.length = count;
1202         mb();
1203         rc = signal_vsp_instruction(&vsp_cmd);
1204         if (rc)
1205                 goto out_free;
1206         rc = -ENOMEM;
1207         if (vsp_cmd.result_code != 0)
1208                 goto out_free;
1209
1210         *ppos += count;
1211         rc = count;
1212 out_free:
1213         dma_free_coherent(iSeries_vio_dev, count, page, dma_addr);
1214 out:
1215         return rc;
1216 }
1217
1218 static struct file_operations proc_vmlinux_operations = {
1219         .write          = proc_mf_change_vmlinux,
1220 };
1221
1222 static int __init mf_proc_init(void)
1223 {
1224         struct proc_dir_entry *mf_proc_root;
1225         struct proc_dir_entry *ent;
1226         struct proc_dir_entry *mf;
1227         char name[2];
1228         int i;
1229
1230         mf_proc_root = proc_mkdir("iSeries/mf", NULL);
1231         if (!mf_proc_root)
1232                 return 1;
1233
1234         name[1] = '\0';
1235         for (i = 0; i < 4; i++) {
1236                 name[0] = 'A' + i;
1237                 mf = proc_mkdir(name, mf_proc_root);
1238                 if (!mf)
1239                         return 1;
1240
1241                 ent = create_proc_entry("cmdline", S_IFREG|S_IRUSR|S_IWUSR, mf);
1242                 if (!ent)
1243                         return 1;
1244                 ent->nlink = 1;
1245                 ent->data = (void *)(long)i;
1246                 ent->read_proc = proc_mf_dump_cmdline;
1247                 ent->write_proc = proc_mf_change_cmdline;
1248
1249                 if (i == 3)     /* no vmlinux entry for 'D' */
1250                         continue;
1251
1252                 ent = create_proc_entry("vmlinux", S_IFREG|S_IWUSR, mf);
1253                 if (!ent)
1254                         return 1;
1255                 ent->nlink = 1;
1256                 ent->data = (void *)(long)i;
1257                 ent->proc_fops = &proc_vmlinux_operations;
1258         }
1259
1260         ent = create_proc_entry("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root);
1261         if (!ent)
1262                 return 1;
1263         ent->nlink = 1;
1264         ent->data = (void *)0;
1265         ent->read_proc = proc_mf_dump_side;
1266         ent->write_proc = proc_mf_change_side;
1267
1268         ent = create_proc_entry("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root);
1269         if (!ent)
1270                 return 1;
1271         ent->nlink = 1;
1272         ent->data = (void *)0;
1273         ent->read_proc = proc_mf_dump_src;
1274         ent->write_proc = proc_mf_change_src;
1275
1276         return 0;
1277 }
1278
1279 __initcall(mf_proc_init);
1280
1281 #endif /* CONFIG_PROC_FS */