[S390] cio: Repair chpid event handling.
[linux-2.6] / drivers / s390 / cio / chsc.c
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright IBM Corp. 1999,2008
6  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
7  *               Cornelia Huck (cornelia.huck@de.ibm.com)
8  *               Arnd Bergmann (arndb@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/device.h>
15
16 #include <asm/cio.h>
17 #include <asm/chpid.h>
18
19 #include "../s390mach.h"
20 #include "css.h"
21 #include "cio.h"
22 #include "cio_debug.h"
23 #include "ioasm.h"
24 #include "chp.h"
25 #include "chsc.h"
26
27 static void *sei_page;
28
29 static int chsc_error_from_response(int response)
30 {
31         switch (response) {
32         case 0x0001:
33                 return 0;
34         case 0x0002:
35         case 0x0003:
36         case 0x0006:
37         case 0x0007:
38         case 0x0008:
39         case 0x000a:
40                 return -EINVAL;
41         case 0x0004:
42                 return -EOPNOTSUPP;
43         default:
44                 return -EIO;
45         }
46 }
47
48 struct chsc_ssd_area {
49         struct chsc_header request;
50         u16 :10;
51         u16 ssid:2;
52         u16 :4;
53         u16 f_sch;        /* first subchannel */
54         u16 :16;
55         u16 l_sch;        /* last subchannel */
56         u32 :32;
57         struct chsc_header response;
58         u32 :32;
59         u8 sch_valid : 1;
60         u8 dev_valid : 1;
61         u8 st        : 3; /* subchannel type */
62         u8 zeroes    : 3;
63         u8  unit_addr;    /* unit address */
64         u16 devno;        /* device number */
65         u8 path_mask;
66         u8 fla_valid_mask;
67         u16 sch;          /* subchannel */
68         u8 chpid[8];      /* chpids 0-7 */
69         u16 fla[8];       /* full link addresses 0-7 */
70 } __attribute__ ((packed));
71
72 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
73 {
74         unsigned long page;
75         struct chsc_ssd_area *ssd_area;
76         int ccode;
77         int ret;
78         int i;
79         int mask;
80
81         page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
82         if (!page)
83                 return -ENOMEM;
84         ssd_area = (struct chsc_ssd_area *) page;
85         ssd_area->request.length = 0x0010;
86         ssd_area->request.code = 0x0004;
87         ssd_area->ssid = schid.ssid;
88         ssd_area->f_sch = schid.sch_no;
89         ssd_area->l_sch = schid.sch_no;
90
91         ccode = chsc(ssd_area);
92         /* Check response. */
93         if (ccode > 0) {
94                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
95                 goto out_free;
96         }
97         ret = chsc_error_from_response(ssd_area->response.code);
98         if (ret != 0) {
99                 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
100                               schid.ssid, schid.sch_no,
101                               ssd_area->response.code);
102                 goto out_free;
103         }
104         if (!ssd_area->sch_valid) {
105                 ret = -ENODEV;
106                 goto out_free;
107         }
108         /* Copy data */
109         ret = 0;
110         memset(ssd, 0, sizeof(struct chsc_ssd_info));
111         if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
112             (ssd_area->st != SUBCHANNEL_TYPE_MSG))
113                 goto out_free;
114         ssd->path_mask = ssd_area->path_mask;
115         ssd->fla_valid_mask = ssd_area->fla_valid_mask;
116         for (i = 0; i < 8; i++) {
117                 mask = 0x80 >> i;
118                 if (ssd_area->path_mask & mask) {
119                         chp_id_init(&ssd->chpid[i]);
120                         ssd->chpid[i].id = ssd_area->chpid[i];
121                 }
122                 if (ssd_area->fla_valid_mask & mask)
123                         ssd->fla[i] = ssd_area->fla[i];
124         }
125 out_free:
126         free_page(page);
127         return ret;
128 }
129
130 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
131 {
132         spin_lock_irq(sch->lock);
133         if (sch->driver && sch->driver->chp_event)
134                 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
135                         goto out_unreg;
136         spin_unlock_irq(sch->lock);
137         return 0;
138
139 out_unreg:
140         sch->lpm = 0;
141         spin_unlock_irq(sch->lock);
142         css_schedule_eval(sch->schid);
143         return 0;
144 }
145
146 void chsc_chp_offline(struct chp_id chpid)
147 {
148         char dbf_txt[15];
149         struct chp_link link;
150
151         sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
152         CIO_TRACE_EVENT(2, dbf_txt);
153
154         if (chp_get_status(chpid) <= 0)
155                 return;
156         memset(&link, 0, sizeof(struct chp_link));
157         link.chpid = chpid;
158         /* Wait until previous actions have settled. */
159         css_wait_for_slow_path();
160         for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
161 }
162
163 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
164 {
165         struct schib schib;
166         /*
167          * We don't know the device yet, but since a path
168          * may be available now to the device we'll have
169          * to do recognition again.
170          * Since we don't have any idea about which chpid
171          * that beast may be on we'll have to do a stsch
172          * on all devices, grr...
173          */
174         if (stsch_err(schid, &schib))
175                 /* We're through */
176                 return -ENXIO;
177
178         /* Put it on the slow path. */
179         css_schedule_eval(schid);
180         return 0;
181 }
182
183 static int __s390_process_res_acc(struct subchannel *sch, void *data)
184 {
185         spin_lock_irq(sch->lock);
186         if (sch->driver && sch->driver->chp_event)
187                 sch->driver->chp_event(sch, data, CHP_ONLINE);
188         spin_unlock_irq(sch->lock);
189
190         return 0;
191 }
192
193 static void s390_process_res_acc(struct chp_link *link)
194 {
195         char dbf_txt[15];
196
197         sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
198                 link->chpid.id);
199         CIO_TRACE_EVENT( 2, dbf_txt);
200         if (link->fla != 0) {
201                 sprintf(dbf_txt, "fla%x", link->fla);
202                 CIO_TRACE_EVENT( 2, dbf_txt);
203         }
204         /* Wait until previous actions have settled. */
205         css_wait_for_slow_path();
206         /*
207          * I/O resources may have become accessible.
208          * Scan through all subchannels that may be concerned and
209          * do a validation on those.
210          * The more information we have (info), the less scanning
211          * will we have to do.
212          */
213         for_each_subchannel_staged(__s390_process_res_acc,
214                                    s390_process_res_acc_new_sch, link);
215 }
216
217 static int
218 __get_chpid_from_lir(void *data)
219 {
220         struct lir {
221                 u8  iq;
222                 u8  ic;
223                 u16 sci;
224                 /* incident-node descriptor */
225                 u32 indesc[28];
226                 /* attached-node descriptor */
227                 u32 andesc[28];
228                 /* incident-specific information */
229                 u32 isinfo[28];
230         } __attribute__ ((packed)) *lir;
231
232         lir = data;
233         if (!(lir->iq&0x80))
234                 /* NULL link incident record */
235                 return -EINVAL;
236         if (!(lir->indesc[0]&0xc0000000))
237                 /* node descriptor not valid */
238                 return -EINVAL;
239         if (!(lir->indesc[0]&0x10000000))
240                 /* don't handle device-type nodes - FIXME */
241                 return -EINVAL;
242         /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
243
244         return (u16) (lir->indesc[0]&0x000000ff);
245 }
246
247 struct chsc_sei_area {
248         struct chsc_header request;
249         u32 reserved1;
250         u32 reserved2;
251         u32 reserved3;
252         struct chsc_header response;
253         u32 reserved4;
254         u8  flags;
255         u8  vf;         /* validity flags */
256         u8  rs;         /* reporting source */
257         u8  cc;         /* content code */
258         u16 fla;        /* full link address */
259         u16 rsid;       /* reporting source id */
260         u32 reserved5;
261         u32 reserved6;
262         u8 ccdf[4096 - 16 - 24];        /* content-code dependent field */
263         /* ccdf has to be big enough for a link-incident record */
264 } __attribute__ ((packed));
265
266 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
267 {
268         struct chp_id chpid;
269         int id;
270
271         CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
272                       sei_area->rs, sei_area->rsid);
273         if (sei_area->rs != 4)
274                 return;
275         id = __get_chpid_from_lir(sei_area->ccdf);
276         if (id < 0)
277                 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
278         else {
279                 chp_id_init(&chpid);
280                 chpid.id = id;
281                 chsc_chp_offline(chpid);
282         }
283 }
284
285 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
286 {
287         struct chp_link link;
288         struct chp_id chpid;
289         int status;
290
291         CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
292                       "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
293         if (sei_area->rs != 4)
294                 return;
295         chp_id_init(&chpid);
296         chpid.id = sei_area->rsid;
297         /* allocate a new channel path structure, if needed */
298         status = chp_get_status(chpid);
299         if (status < 0)
300                 chp_new(chpid);
301         else if (!status)
302                 return;
303         memset(&link, 0, sizeof(struct chp_link));
304         link.chpid = chpid;
305         if ((sei_area->vf & 0xc0) != 0) {
306                 link.fla = sei_area->fla;
307                 if ((sei_area->vf & 0xc0) == 0xc0)
308                         /* full link address */
309                         link.fla_mask = 0xffff;
310                 else
311                         /* link address */
312                         link.fla_mask = 0xff00;
313         }
314         s390_process_res_acc(&link);
315 }
316
317 struct chp_config_data {
318         u8 map[32];
319         u8 op;
320         u8 pc;
321 };
322
323 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
324 {
325         struct chp_config_data *data;
326         struct chp_id chpid;
327         int num;
328
329         CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
330         if (sei_area->rs != 0)
331                 return;
332         data = (struct chp_config_data *) &(sei_area->ccdf);
333         chp_id_init(&chpid);
334         for (num = 0; num <= __MAX_CHPID; num++) {
335                 if (!chp_test_bit(data->map, num))
336                         continue;
337                 chpid.id = num;
338                 printk(KERN_WARNING "cio: processing configure event %d for "
339                        "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
340                 switch (data->op) {
341                 case 0:
342                         chp_cfg_schedule(chpid, 1);
343                         break;
344                 case 1:
345                         chp_cfg_schedule(chpid, 0);
346                         break;
347                 case 2:
348                         chp_cfg_cancel_deconfigure(chpid);
349                         break;
350                 }
351         }
352 }
353
354 static void chsc_process_sei(struct chsc_sei_area *sei_area)
355 {
356         /* Check if we might have lost some information. */
357         if (sei_area->flags & 0x40) {
358                 CIO_CRW_EVENT(2, "chsc: event overflow\n");
359                 css_schedule_eval_all();
360         }
361         /* which kind of information was stored? */
362         switch (sei_area->cc) {
363         case 1: /* link incident*/
364                 chsc_process_sei_link_incident(sei_area);
365                 break;
366         case 2: /* i/o resource accessibiliy */
367                 chsc_process_sei_res_acc(sei_area);
368                 break;
369         case 8: /* channel-path-configuration notification */
370                 chsc_process_sei_chp_config(sei_area);
371                 break;
372         default: /* other stuff */
373                 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
374                               sei_area->cc);
375                 break;
376         }
377 }
378
379 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
380 {
381         struct chsc_sei_area *sei_area;
382
383         if (overflow) {
384                 css_schedule_eval_all();
385                 return;
386         }
387         CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
388                       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
389                       crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
390                       crw0->erc, crw0->rsid);
391         if (!sei_page)
392                 return;
393         /* Access to sei_page is serialized through machine check handler
394          * thread, so no need for locking. */
395         sei_area = sei_page;
396
397         CIO_TRACE_EVENT(2, "prcss");
398         do {
399                 memset(sei_area, 0, sizeof(*sei_area));
400                 sei_area->request.length = 0x0010;
401                 sei_area->request.code = 0x000e;
402                 if (chsc(sei_area))
403                         break;
404
405                 if (sei_area->response.code == 0x0001) {
406                         CIO_CRW_EVENT(4, "chsc: sei successful\n");
407                         chsc_process_sei(sei_area);
408                 } else {
409                         CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
410                                       sei_area->response.code);
411                         break;
412                 }
413         } while (sei_area->flags & 0x80);
414 }
415
416 void chsc_chp_online(struct chp_id chpid)
417 {
418         char dbf_txt[15];
419         struct chp_link link;
420
421         sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
422         CIO_TRACE_EVENT(2, dbf_txt);
423
424         if (chp_get_status(chpid) != 0) {
425                 memset(&link, 0, sizeof(struct chp_link));
426                 link.chpid = chpid;
427                 /* Wait until previous actions have settled. */
428                 css_wait_for_slow_path();
429                 for_each_subchannel_staged(__s390_process_res_acc, NULL,
430                                            &link);
431         }
432 }
433
434 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
435                                          struct chp_id chpid, int on)
436 {
437         unsigned long flags;
438         struct chp_link link;
439
440         memset(&link, 0, sizeof(struct chp_link));
441         link.chpid = chpid;
442         spin_lock_irqsave(sch->lock, flags);
443         if (sch->driver && sch->driver->chp_event)
444                 sch->driver->chp_event(sch, &link,
445                                        on ? CHP_VARY_ON : CHP_VARY_OFF);
446         spin_unlock_irqrestore(sch->lock, flags);
447 }
448
449 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
450 {
451         struct chp_id *chpid = data;
452
453         __s390_subchannel_vary_chpid(sch, *chpid, 0);
454         return 0;
455 }
456
457 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
458 {
459         struct chp_id *chpid = data;
460
461         __s390_subchannel_vary_chpid(sch, *chpid, 1);
462         return 0;
463 }
464
465 static int
466 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
467 {
468         struct schib schib;
469
470         if (stsch_err(schid, &schib))
471                 /* We're through */
472                 return -ENXIO;
473         /* Put it on the slow path. */
474         css_schedule_eval(schid);
475         return 0;
476 }
477
478 /**
479  * chsc_chp_vary - propagate channel-path vary operation to subchannels
480  * @chpid: channl-path ID
481  * @on: non-zero for vary online, zero for vary offline
482  */
483 int chsc_chp_vary(struct chp_id chpid, int on)
484 {
485         struct chp_link link;
486
487         memset(&link, 0, sizeof(struct chp_link));
488         link.chpid = chpid;
489         /* Wait until previous actions have settled. */
490         css_wait_for_slow_path();
491         /*
492          * Redo PathVerification on the devices the chpid connects to
493          */
494
495         if (on)
496                 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
497                                            __s390_vary_chpid_on, &link);
498         else
499                 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
500                                            NULL, &link);
501
502         return 0;
503 }
504
505 static void
506 chsc_remove_cmg_attr(struct channel_subsystem *css)
507 {
508         int i;
509
510         for (i = 0; i <= __MAX_CHPID; i++) {
511                 if (!css->chps[i])
512                         continue;
513                 chp_remove_cmg_attr(css->chps[i]);
514         }
515 }
516
517 static int
518 chsc_add_cmg_attr(struct channel_subsystem *css)
519 {
520         int i, ret;
521
522         ret = 0;
523         for (i = 0; i <= __MAX_CHPID; i++) {
524                 if (!css->chps[i])
525                         continue;
526                 ret = chp_add_cmg_attr(css->chps[i]);
527                 if (ret)
528                         goto cleanup;
529         }
530         return ret;
531 cleanup:
532         for (--i; i >= 0; i--) {
533                 if (!css->chps[i])
534                         continue;
535                 chp_remove_cmg_attr(css->chps[i]);
536         }
537         return ret;
538 }
539
540 static int
541 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
542 {
543         struct {
544                 struct chsc_header request;
545                 u32 operation_code : 2;
546                 u32 : 30;
547                 u32 key : 4;
548                 u32 : 28;
549                 u32 zeroes1;
550                 u32 cub_addr1;
551                 u32 zeroes2;
552                 u32 cub_addr2;
553                 u32 reserved[13];
554                 struct chsc_header response;
555                 u32 status : 8;
556                 u32 : 4;
557                 u32 fmt : 4;
558                 u32 : 16;
559         } __attribute__ ((packed)) *secm_area;
560         int ret, ccode;
561
562         secm_area = page;
563         secm_area->request.length = 0x0050;
564         secm_area->request.code = 0x0016;
565
566         secm_area->key = PAGE_DEFAULT_KEY;
567         secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
568         secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
569
570         secm_area->operation_code = enable ? 0 : 1;
571
572         ccode = chsc(secm_area);
573         if (ccode > 0)
574                 return (ccode == 3) ? -ENODEV : -EBUSY;
575
576         switch (secm_area->response.code) {
577         case 0x0102:
578         case 0x0103:
579                 ret = -EINVAL;
580         default:
581                 ret = chsc_error_from_response(secm_area->response.code);
582         }
583         if (ret != 0)
584                 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
585                               secm_area->response.code);
586         return ret;
587 }
588
589 int
590 chsc_secm(struct channel_subsystem *css, int enable)
591 {
592         void  *secm_area;
593         int ret;
594
595         secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
596         if (!secm_area)
597                 return -ENOMEM;
598
599         if (enable && !css->cm_enabled) {
600                 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
601                 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
602                 if (!css->cub_addr1 || !css->cub_addr2) {
603                         free_page((unsigned long)css->cub_addr1);
604                         free_page((unsigned long)css->cub_addr2);
605                         free_page((unsigned long)secm_area);
606                         return -ENOMEM;
607                 }
608         }
609         ret = __chsc_do_secm(css, enable, secm_area);
610         if (!ret) {
611                 css->cm_enabled = enable;
612                 if (css->cm_enabled) {
613                         ret = chsc_add_cmg_attr(css);
614                         if (ret) {
615                                 memset(secm_area, 0, PAGE_SIZE);
616                                 __chsc_do_secm(css, 0, secm_area);
617                                 css->cm_enabled = 0;
618                         }
619                 } else
620                         chsc_remove_cmg_attr(css);
621         }
622         if (!css->cm_enabled) {
623                 free_page((unsigned long)css->cub_addr1);
624                 free_page((unsigned long)css->cub_addr2);
625         }
626         free_page((unsigned long)secm_area);
627         return ret;
628 }
629
630 int chsc_determine_channel_path_description(struct chp_id chpid,
631                                             struct channel_path_desc *desc)
632 {
633         int ccode, ret;
634
635         struct {
636                 struct chsc_header request;
637                 u32 : 24;
638                 u32 first_chpid : 8;
639                 u32 : 24;
640                 u32 last_chpid : 8;
641                 u32 zeroes1;
642                 struct chsc_header response;
643                 u32 zeroes2;
644                 struct channel_path_desc desc;
645         } __attribute__ ((packed)) *scpd_area;
646
647         scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
648         if (!scpd_area)
649                 return -ENOMEM;
650
651         scpd_area->request.length = 0x0010;
652         scpd_area->request.code = 0x0002;
653
654         scpd_area->first_chpid = chpid.id;
655         scpd_area->last_chpid = chpid.id;
656
657         ccode = chsc(scpd_area);
658         if (ccode > 0) {
659                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
660                 goto out;
661         }
662
663         ret = chsc_error_from_response(scpd_area->response.code);
664         if (ret == 0)
665                 /* Success. */
666                 memcpy(desc, &scpd_area->desc,
667                        sizeof(struct channel_path_desc));
668         else
669                 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
670                               scpd_area->response.code);
671 out:
672         free_page((unsigned long)scpd_area);
673         return ret;
674 }
675
676 static void
677 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
678                           struct cmg_chars *chars)
679 {
680         switch (chp->cmg) {
681         case 2:
682         case 3:
683                 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
684                                          GFP_KERNEL);
685                 if (chp->cmg_chars) {
686                         int i, mask;
687                         struct cmg_chars *cmg_chars;
688
689                         cmg_chars = chp->cmg_chars;
690                         for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
691                                 mask = 0x80 >> (i + 3);
692                                 if (cmcv & mask)
693                                         cmg_chars->values[i] = chars->values[i];
694                                 else
695                                         cmg_chars->values[i] = 0;
696                         }
697                 }
698                 break;
699         default:
700                 /* No cmg-dependent data. */
701                 break;
702         }
703 }
704
705 int chsc_get_channel_measurement_chars(struct channel_path *chp)
706 {
707         int ccode, ret;
708
709         struct {
710                 struct chsc_header request;
711                 u32 : 24;
712                 u32 first_chpid : 8;
713                 u32 : 24;
714                 u32 last_chpid : 8;
715                 u32 zeroes1;
716                 struct chsc_header response;
717                 u32 zeroes2;
718                 u32 not_valid : 1;
719                 u32 shared : 1;
720                 u32 : 22;
721                 u32 chpid : 8;
722                 u32 cmcv : 5;
723                 u32 : 11;
724                 u32 cmgq : 8;
725                 u32 cmg : 8;
726                 u32 zeroes3;
727                 u32 data[NR_MEASUREMENT_CHARS];
728         } __attribute__ ((packed)) *scmc_area;
729
730         scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
731         if (!scmc_area)
732                 return -ENOMEM;
733
734         scmc_area->request.length = 0x0010;
735         scmc_area->request.code = 0x0022;
736
737         scmc_area->first_chpid = chp->chpid.id;
738         scmc_area->last_chpid = chp->chpid.id;
739
740         ccode = chsc(scmc_area);
741         if (ccode > 0) {
742                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
743                 goto out;
744         }
745
746         ret = chsc_error_from_response(scmc_area->response.code);
747         if (ret == 0) {
748                 /* Success. */
749                 if (!scmc_area->not_valid) {
750                         chp->cmg = scmc_area->cmg;
751                         chp->shared = scmc_area->shared;
752                         chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
753                                                   (struct cmg_chars *)
754                                                   &scmc_area->data);
755                 } else {
756                         chp->cmg = -1;
757                         chp->shared = -1;
758                 }
759         } else {
760                 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
761                               scmc_area->response.code);
762         }
763 out:
764         free_page((unsigned long)scmc_area);
765         return ret;
766 }
767
768 int __init chsc_alloc_sei_area(void)
769 {
770         int ret;
771
772         sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
773         if (!sei_page) {
774                 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
775                               "chsc machine checks!\n");
776                 return -ENOMEM;
777         }
778         ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw);
779         if (ret)
780                 kfree(sei_page);
781         return ret;
782 }
783
784 void __init chsc_free_sei_area(void)
785 {
786         s390_unregister_crw_handler(CRW_RSC_CSS);
787         kfree(sei_page);
788 }
789
790 int __init
791 chsc_enable_facility(int operation_code)
792 {
793         int ret;
794         struct {
795                 struct chsc_header request;
796                 u8 reserved1:4;
797                 u8 format:4;
798                 u8 reserved2;
799                 u16 operation_code;
800                 u32 reserved3;
801                 u32 reserved4;
802                 u32 operation_data_area[252];
803                 struct chsc_header response;
804                 u32 reserved5:4;
805                 u32 format2:4;
806                 u32 reserved6:24;
807         } __attribute__ ((packed)) *sda_area;
808
809         sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
810         if (!sda_area)
811                 return -ENOMEM;
812         sda_area->request.length = 0x0400;
813         sda_area->request.code = 0x0031;
814         sda_area->operation_code = operation_code;
815
816         ret = chsc(sda_area);
817         if (ret > 0) {
818                 ret = (ret == 3) ? -ENODEV : -EBUSY;
819                 goto out;
820         }
821
822         switch (sda_area->response.code) {
823         case 0x0101:
824                 ret = -EOPNOTSUPP;
825                 break;
826         default:
827                 ret = chsc_error_from_response(sda_area->response.code);
828         }
829         if (ret != 0)
830                 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
831                               operation_code, sda_area->response.code);
832  out:
833         free_page((unsigned long)sda_area);
834         return ret;
835 }
836
837 struct css_general_char css_general_characteristics;
838 struct css_chsc_char css_chsc_characteristics;
839
840 int __init
841 chsc_determine_css_characteristics(void)
842 {
843         int result;
844         struct {
845                 struct chsc_header request;
846                 u32 reserved1;
847                 u32 reserved2;
848                 u32 reserved3;
849                 struct chsc_header response;
850                 u32 reserved4;
851                 u32 general_char[510];
852                 u32 chsc_char[518];
853         } __attribute__ ((packed)) *scsc_area;
854
855         scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
856         if (!scsc_area)
857                 return -ENOMEM;
858
859         scsc_area->request.length = 0x0010;
860         scsc_area->request.code = 0x0010;
861
862         result = chsc(scsc_area);
863         if (result) {
864                 result = (result == 3) ? -ENODEV : -EBUSY;
865                 goto exit;
866         }
867
868         result = chsc_error_from_response(scsc_area->response.code);
869         if (result == 0) {
870                 memcpy(&css_general_characteristics, scsc_area->general_char,
871                        sizeof(css_general_characteristics));
872                 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
873                        sizeof(css_chsc_characteristics));
874         } else
875                 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
876                               scsc_area->response.code);
877 exit:
878         free_page ((unsigned long) scsc_area);
879         return result;
880 }
881
882 EXPORT_SYMBOL_GPL(css_general_characteristics);
883 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
884
885 int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
886 {
887         struct {
888                 struct chsc_header request;
889                 unsigned int rsvd0;
890                 unsigned int op : 8;
891                 unsigned int rsvd1 : 8;
892                 unsigned int ctrl : 16;
893                 unsigned int rsvd2[5];
894                 struct chsc_header response;
895                 unsigned int rsvd3[7];
896         } __attribute__ ((packed)) *rr;
897         int rc;
898
899         memset(page, 0, PAGE_SIZE);
900         rr = page;
901         rr->request.length = 0x0020;
902         rr->request.code = 0x0033;
903         rr->op = op;
904         rr->ctrl = ctrl;
905         rc = chsc(rr);
906         if (rc)
907                 return -EIO;
908         rc = (rr->response.code == 0x0001) ? 0 : -EIO;
909         return rc;
910 }
911
912 int chsc_sstpi(void *page, void *result, size_t size)
913 {
914         struct {
915                 struct chsc_header request;
916                 unsigned int rsvd0[3];
917                 struct chsc_header response;
918                 char data[size];
919         } __attribute__ ((packed)) *rr;
920         int rc;
921
922         memset(page, 0, PAGE_SIZE);
923         rr = page;
924         rr->request.length = 0x0010;
925         rr->request.code = 0x0038;
926         rc = chsc(rr);
927         if (rc)
928                 return -EIO;
929         memcpy(result, &rr->data, size);
930         return (rr->response.code == 0x0001) ? 0 : -EIO;
931 }
932