Merge branch 'smsc47b397-new-id' into release
[linux-2.6] / drivers / s390 / cio / chsc.c
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6  *                            IBM Corporation
7  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
8  *               Cornelia Huck (cornelia.huck@de.ibm.com)
9  *               Arnd Bergmann (arndb@de.ibm.com)
10  */
11
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16
17 #include <asm/cio.h>
18 #include <asm/chpid.h>
19
20 #include "css.h"
21 #include "cio.h"
22 #include "cio_debug.h"
23 #include "ioasm.h"
24 #include "chp.h"
25 #include "chsc.h"
26
27 static void *sei_page;
28
29 static int chsc_error_from_response(int response)
30 {
31         switch (response) {
32         case 0x0001:
33                 return 0;
34         case 0x0002:
35         case 0x0003:
36         case 0x0006:
37         case 0x0007:
38         case 0x0008:
39         case 0x000a:
40                 return -EINVAL;
41         case 0x0004:
42                 return -EOPNOTSUPP;
43         default:
44                 return -EIO;
45         }
46 }
47
48 struct chsc_ssd_area {
49         struct chsc_header request;
50         u16 :10;
51         u16 ssid:2;
52         u16 :4;
53         u16 f_sch;        /* first subchannel */
54         u16 :16;
55         u16 l_sch;        /* last subchannel */
56         u32 :32;
57         struct chsc_header response;
58         u32 :32;
59         u8 sch_valid : 1;
60         u8 dev_valid : 1;
61         u8 st        : 3; /* subchannel type */
62         u8 zeroes    : 3;
63         u8  unit_addr;    /* unit address */
64         u16 devno;        /* device number */
65         u8 path_mask;
66         u8 fla_valid_mask;
67         u16 sch;          /* subchannel */
68         u8 chpid[8];      /* chpids 0-7 */
69         u16 fla[8];       /* full link addresses 0-7 */
70 } __attribute__ ((packed));
71
72 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
73 {
74         unsigned long page;
75         struct chsc_ssd_area *ssd_area;
76         int ccode;
77         int ret;
78         int i;
79         int mask;
80
81         page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
82         if (!page)
83                 return -ENOMEM;
84         ssd_area = (struct chsc_ssd_area *) page;
85         ssd_area->request.length = 0x0010;
86         ssd_area->request.code = 0x0004;
87         ssd_area->ssid = schid.ssid;
88         ssd_area->f_sch = schid.sch_no;
89         ssd_area->l_sch = schid.sch_no;
90
91         ccode = chsc(ssd_area);
92         /* Check response. */
93         if (ccode > 0) {
94                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
95                 goto out_free;
96         }
97         ret = chsc_error_from_response(ssd_area->response.code);
98         if (ret != 0) {
99                 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
100                               schid.ssid, schid.sch_no,
101                               ssd_area->response.code);
102                 goto out_free;
103         }
104         if (!ssd_area->sch_valid) {
105                 ret = -ENODEV;
106                 goto out_free;
107         }
108         /* Copy data */
109         ret = 0;
110         memset(ssd, 0, sizeof(struct chsc_ssd_info));
111         if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
112             (ssd_area->st != SUBCHANNEL_TYPE_MSG))
113                 goto out_free;
114         ssd->path_mask = ssd_area->path_mask;
115         ssd->fla_valid_mask = ssd_area->fla_valid_mask;
116         for (i = 0; i < 8; i++) {
117                 mask = 0x80 >> i;
118                 if (ssd_area->path_mask & mask) {
119                         chp_id_init(&ssd->chpid[i]);
120                         ssd->chpid[i].id = ssd_area->chpid[i];
121                 }
122                 if (ssd_area->fla_valid_mask & mask)
123                         ssd->fla[i] = ssd_area->fla[i];
124         }
125 out_free:
126         free_page(page);
127         return ret;
128 }
129
130 static int check_for_io_on_path(struct subchannel *sch, int mask)
131 {
132         int cc;
133
134         cc = stsch(sch->schid, &sch->schib);
135         if (cc)
136                 return 0;
137         if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
138                 return 1;
139         return 0;
140 }
141
142 static void terminate_internal_io(struct subchannel *sch)
143 {
144         if (cio_clear(sch)) {
145                 /* Recheck device in case clear failed. */
146                 sch->lpm = 0;
147                 if (device_trigger_verify(sch) != 0)
148                         css_schedule_eval(sch->schid);
149                 return;
150         }
151         /* Request retry of internal operation. */
152         device_set_intretry(sch);
153         /* Call handler. */
154         if (sch->driver && sch->driver->termination)
155                 sch->driver->termination(sch);
156 }
157
158 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
159 {
160         int j;
161         int mask;
162         struct chp_id *chpid = data;
163         struct schib schib;
164
165         for (j = 0; j < 8; j++) {
166                 mask = 0x80 >> j;
167                 if ((sch->schib.pmcw.pim & mask) &&
168                     (sch->schib.pmcw.chpid[j] == chpid->id))
169                         break;
170         }
171         if (j >= 8)
172                 return 0;
173
174         spin_lock_irq(sch->lock);
175
176         stsch(sch->schid, &schib);
177         if (!css_sch_is_valid(&schib))
178                 goto out_unreg;
179         memcpy(&sch->schib, &schib, sizeof(struct schib));
180         /* Check for single path devices. */
181         if (sch->schib.pmcw.pim == 0x80)
182                 goto out_unreg;
183
184         if (check_for_io_on_path(sch, mask)) {
185                 if (device_is_online(sch))
186                         device_kill_io(sch);
187                 else {
188                         terminate_internal_io(sch);
189                         /* Re-start path verification. */
190                         if (sch->driver && sch->driver->verify)
191                                 sch->driver->verify(sch);
192                 }
193         } else {
194                 /* trigger path verification. */
195                 if (sch->driver && sch->driver->verify)
196                         sch->driver->verify(sch);
197                 else if (sch->lpm == mask)
198                         goto out_unreg;
199         }
200
201         spin_unlock_irq(sch->lock);
202         return 0;
203
204 out_unreg:
205         sch->lpm = 0;
206         spin_unlock_irq(sch->lock);
207         css_schedule_eval(sch->schid);
208         return 0;
209 }
210
211 void chsc_chp_offline(struct chp_id chpid)
212 {
213         char dbf_txt[15];
214
215         sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
216         CIO_TRACE_EVENT(2, dbf_txt);
217
218         if (chp_get_status(chpid) <= 0)
219                 return;
220         for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
221 }
222
223 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
224 {
225         struct schib schib;
226         /*
227          * We don't know the device yet, but since a path
228          * may be available now to the device we'll have
229          * to do recognition again.
230          * Since we don't have any idea about which chpid
231          * that beast may be on we'll have to do a stsch
232          * on all devices, grr...
233          */
234         if (stsch_err(schid, &schib))
235                 /* We're through */
236                 return -ENXIO;
237
238         /* Put it on the slow path. */
239         css_schedule_eval(schid);
240         return 0;
241 }
242
243 struct res_acc_data {
244         struct chp_id chpid;
245         u32 fla_mask;
246         u16 fla;
247 };
248
249 static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
250                               struct res_acc_data *data)
251 {
252         int i;
253         int mask;
254
255         for (i = 0; i < 8; i++) {
256                 mask = 0x80 >> i;
257                 if (!(ssd->path_mask & mask))
258                         continue;
259                 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
260                         continue;
261                 if ((ssd->fla_valid_mask & mask) &&
262                     ((ssd->fla[i] & data->fla_mask) != data->fla))
263                         continue;
264                 return mask;
265         }
266         return 0;
267 }
268
269 static int __s390_process_res_acc(struct subchannel *sch, void *data)
270 {
271         int chp_mask, old_lpm;
272         struct res_acc_data *res_data = data;
273
274         spin_lock_irq(sch->lock);
275         chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
276         if (chp_mask == 0)
277                 goto out;
278         if (stsch(sch->schid, &sch->schib))
279                 goto out;
280         old_lpm = sch->lpm;
281         sch->lpm = ((sch->schib.pmcw.pim &
282                      sch->schib.pmcw.pam &
283                      sch->schib.pmcw.pom)
284                     | chp_mask) & sch->opm;
285         if (!old_lpm && sch->lpm)
286                 device_trigger_reprobe(sch);
287         else if (sch->driver && sch->driver->verify)
288                 sch->driver->verify(sch);
289 out:
290         spin_unlock_irq(sch->lock);
291
292         return 0;
293 }
294
295 static void s390_process_res_acc (struct res_acc_data *res_data)
296 {
297         char dbf_txt[15];
298
299         sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
300                 res_data->chpid.id);
301         CIO_TRACE_EVENT( 2, dbf_txt);
302         if (res_data->fla != 0) {
303                 sprintf(dbf_txt, "fla%x", res_data->fla);
304                 CIO_TRACE_EVENT( 2, dbf_txt);
305         }
306
307         /*
308          * I/O resources may have become accessible.
309          * Scan through all subchannels that may be concerned and
310          * do a validation on those.
311          * The more information we have (info), the less scanning
312          * will we have to do.
313          */
314         for_each_subchannel_staged(__s390_process_res_acc,
315                                    s390_process_res_acc_new_sch, res_data);
316 }
317
318 static int
319 __get_chpid_from_lir(void *data)
320 {
321         struct lir {
322                 u8  iq;
323                 u8  ic;
324                 u16 sci;
325                 /* incident-node descriptor */
326                 u32 indesc[28];
327                 /* attached-node descriptor */
328                 u32 andesc[28];
329                 /* incident-specific information */
330                 u32 isinfo[28];
331         } __attribute__ ((packed)) *lir;
332
333         lir = data;
334         if (!(lir->iq&0x80))
335                 /* NULL link incident record */
336                 return -EINVAL;
337         if (!(lir->indesc[0]&0xc0000000))
338                 /* node descriptor not valid */
339                 return -EINVAL;
340         if (!(lir->indesc[0]&0x10000000))
341                 /* don't handle device-type nodes - FIXME */
342                 return -EINVAL;
343         /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
344
345         return (u16) (lir->indesc[0]&0x000000ff);
346 }
347
348 struct chsc_sei_area {
349         struct chsc_header request;
350         u32 reserved1;
351         u32 reserved2;
352         u32 reserved3;
353         struct chsc_header response;
354         u32 reserved4;
355         u8  flags;
356         u8  vf;         /* validity flags */
357         u8  rs;         /* reporting source */
358         u8  cc;         /* content code */
359         u16 fla;        /* full link address */
360         u16 rsid;       /* reporting source id */
361         u32 reserved5;
362         u32 reserved6;
363         u8 ccdf[4096 - 16 - 24];        /* content-code dependent field */
364         /* ccdf has to be big enough for a link-incident record */
365 } __attribute__ ((packed));
366
367 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
368 {
369         struct chp_id chpid;
370         int id;
371
372         CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
373                       sei_area->rs, sei_area->rsid);
374         if (sei_area->rs != 4)
375                 return;
376         id = __get_chpid_from_lir(sei_area->ccdf);
377         if (id < 0)
378                 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
379         else {
380                 chp_id_init(&chpid);
381                 chpid.id = id;
382                 chsc_chp_offline(chpid);
383         }
384 }
385
386 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
387 {
388         struct res_acc_data res_data;
389         struct chp_id chpid;
390         int status;
391
392         CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
393                       "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
394         if (sei_area->rs != 4)
395                 return;
396         chp_id_init(&chpid);
397         chpid.id = sei_area->rsid;
398         /* allocate a new channel path structure, if needed */
399         status = chp_get_status(chpid);
400         if (status < 0)
401                 chp_new(chpid);
402         else if (!status)
403                 return;
404         memset(&res_data, 0, sizeof(struct res_acc_data));
405         res_data.chpid = chpid;
406         if ((sei_area->vf & 0xc0) != 0) {
407                 res_data.fla = sei_area->fla;
408                 if ((sei_area->vf & 0xc0) == 0xc0)
409                         /* full link address */
410                         res_data.fla_mask = 0xffff;
411                 else
412                         /* link address */
413                         res_data.fla_mask = 0xff00;
414         }
415         s390_process_res_acc(&res_data);
416 }
417
418 struct chp_config_data {
419         u8 map[32];
420         u8 op;
421         u8 pc;
422 };
423
424 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
425 {
426         struct chp_config_data *data;
427         struct chp_id chpid;
428         int num;
429
430         CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
431         if (sei_area->rs != 0)
432                 return;
433         data = (struct chp_config_data *) &(sei_area->ccdf);
434         chp_id_init(&chpid);
435         for (num = 0; num <= __MAX_CHPID; num++) {
436                 if (!chp_test_bit(data->map, num))
437                         continue;
438                 chpid.id = num;
439                 printk(KERN_WARNING "cio: processing configure event %d for "
440                        "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
441                 switch (data->op) {
442                 case 0:
443                         chp_cfg_schedule(chpid, 1);
444                         break;
445                 case 1:
446                         chp_cfg_schedule(chpid, 0);
447                         break;
448                 case 2:
449                         chp_cfg_cancel_deconfigure(chpid);
450                         break;
451                 }
452         }
453 }
454
455 static void chsc_process_sei(struct chsc_sei_area *sei_area)
456 {
457         /* Check if we might have lost some information. */
458         if (sei_area->flags & 0x40) {
459                 CIO_CRW_EVENT(2, "chsc: event overflow\n");
460                 css_schedule_eval_all();
461         }
462         /* which kind of information was stored? */
463         switch (sei_area->cc) {
464         case 1: /* link incident*/
465                 chsc_process_sei_link_incident(sei_area);
466                 break;
467         case 2: /* i/o resource accessibiliy */
468                 chsc_process_sei_res_acc(sei_area);
469                 break;
470         case 8: /* channel-path-configuration notification */
471                 chsc_process_sei_chp_config(sei_area);
472                 break;
473         default: /* other stuff */
474                 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
475                               sei_area->cc);
476                 break;
477         }
478 }
479
480 void chsc_process_crw(void)
481 {
482         struct chsc_sei_area *sei_area;
483
484         if (!sei_page)
485                 return;
486         /* Access to sei_page is serialized through machine check handler
487          * thread, so no need for locking. */
488         sei_area = sei_page;
489
490         CIO_TRACE_EVENT( 2, "prcss");
491         do {
492                 memset(sei_area, 0, sizeof(*sei_area));
493                 sei_area->request.length = 0x0010;
494                 sei_area->request.code = 0x000e;
495                 if (chsc(sei_area))
496                         break;
497
498                 if (sei_area->response.code == 0x0001) {
499                         CIO_CRW_EVENT(4, "chsc: sei successful\n");
500                         chsc_process_sei(sei_area);
501                 } else {
502                         CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
503                                       sei_area->response.code);
504                         break;
505                 }
506         } while (sei_area->flags & 0x80);
507 }
508
509 static int __chp_add_new_sch(struct subchannel_id schid, void *data)
510 {
511         struct schib schib;
512
513         if (stsch_err(schid, &schib))
514                 /* We're through */
515                 return -ENXIO;
516
517         /* Put it on the slow path. */
518         css_schedule_eval(schid);
519         return 0;
520 }
521
522
523 static int __chp_add(struct subchannel *sch, void *data)
524 {
525         int i, mask;
526         struct chp_id *chpid = data;
527
528         spin_lock_irq(sch->lock);
529         for (i=0; i<8; i++) {
530                 mask = 0x80 >> i;
531                 if ((sch->schib.pmcw.pim & mask) &&
532                     (sch->schib.pmcw.chpid[i] == chpid->id))
533                         break;
534         }
535         if (i==8) {
536                 spin_unlock_irq(sch->lock);
537                 return 0;
538         }
539         if (stsch(sch->schid, &sch->schib)) {
540                 spin_unlock_irq(sch->lock);
541                 css_schedule_eval(sch->schid);
542                 return 0;
543         }
544         sch->lpm = ((sch->schib.pmcw.pim &
545                      sch->schib.pmcw.pam &
546                      sch->schib.pmcw.pom)
547                     | mask) & sch->opm;
548
549         if (sch->driver && sch->driver->verify)
550                 sch->driver->verify(sch);
551
552         spin_unlock_irq(sch->lock);
553
554         return 0;
555 }
556
557 void chsc_chp_online(struct chp_id chpid)
558 {
559         char dbf_txt[15];
560
561         sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
562         CIO_TRACE_EVENT(2, dbf_txt);
563
564         if (chp_get_status(chpid) != 0)
565                 for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
566                                            &chpid);
567 }
568
569 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
570                                          struct chp_id chpid, int on)
571 {
572         int chp, old_lpm;
573         int mask;
574         unsigned long flags;
575
576         spin_lock_irqsave(sch->lock, flags);
577         old_lpm = sch->lpm;
578         for (chp = 0; chp < 8; chp++) {
579                 mask = 0x80 >> chp;
580                 if (!(sch->ssd_info.path_mask & mask))
581                         continue;
582                 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
583                         continue;
584
585                 if (on) {
586                         sch->opm |= mask;
587                         sch->lpm |= mask;
588                         if (!old_lpm)
589                                 device_trigger_reprobe(sch);
590                         else if (sch->driver && sch->driver->verify)
591                                 sch->driver->verify(sch);
592                         break;
593                 }
594                 sch->opm &= ~mask;
595                 sch->lpm &= ~mask;
596                 if (check_for_io_on_path(sch, mask)) {
597                         if (device_is_online(sch))
598                                 /* Path verification is done after killing. */
599                                 device_kill_io(sch);
600                         else {
601                                 /* Kill and retry internal I/O. */
602                                 terminate_internal_io(sch);
603                                 /* Re-start path verification. */
604                                 if (sch->driver && sch->driver->verify)
605                                         sch->driver->verify(sch);
606                         }
607                 } else if (!sch->lpm) {
608                         if (device_trigger_verify(sch) != 0)
609                                 css_schedule_eval(sch->schid);
610                 } else if (sch->driver && sch->driver->verify)
611                         sch->driver->verify(sch);
612                 break;
613         }
614         spin_unlock_irqrestore(sch->lock, flags);
615 }
616
617 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
618 {
619         struct chp_id *chpid = data;
620
621         __s390_subchannel_vary_chpid(sch, *chpid, 0);
622         return 0;
623 }
624
625 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
626 {
627         struct chp_id *chpid = data;
628
629         __s390_subchannel_vary_chpid(sch, *chpid, 1);
630         return 0;
631 }
632
633 static int
634 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
635 {
636         struct schib schib;
637
638         if (stsch_err(schid, &schib))
639                 /* We're through */
640                 return -ENXIO;
641         /* Put it on the slow path. */
642         css_schedule_eval(schid);
643         return 0;
644 }
645
646 /**
647  * chsc_chp_vary - propagate channel-path vary operation to subchannels
648  * @chpid: channl-path ID
649  * @on: non-zero for vary online, zero for vary offline
650  */
651 int chsc_chp_vary(struct chp_id chpid, int on)
652 {
653         /*
654          * Redo PathVerification on the devices the chpid connects to
655          */
656
657         if (on)
658                 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
659                                            __s390_vary_chpid_on, &chpid);
660         else
661                 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
662                                            NULL, &chpid);
663
664         return 0;
665 }
666
667 static void
668 chsc_remove_cmg_attr(struct channel_subsystem *css)
669 {
670         int i;
671
672         for (i = 0; i <= __MAX_CHPID; i++) {
673                 if (!css->chps[i])
674                         continue;
675                 chp_remove_cmg_attr(css->chps[i]);
676         }
677 }
678
679 static int
680 chsc_add_cmg_attr(struct channel_subsystem *css)
681 {
682         int i, ret;
683
684         ret = 0;
685         for (i = 0; i <= __MAX_CHPID; i++) {
686                 if (!css->chps[i])
687                         continue;
688                 ret = chp_add_cmg_attr(css->chps[i]);
689                 if (ret)
690                         goto cleanup;
691         }
692         return ret;
693 cleanup:
694         for (--i; i >= 0; i--) {
695                 if (!css->chps[i])
696                         continue;
697                 chp_remove_cmg_attr(css->chps[i]);
698         }
699         return ret;
700 }
701
702 static int
703 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
704 {
705         struct {
706                 struct chsc_header request;
707                 u32 operation_code : 2;
708                 u32 : 30;
709                 u32 key : 4;
710                 u32 : 28;
711                 u32 zeroes1;
712                 u32 cub_addr1;
713                 u32 zeroes2;
714                 u32 cub_addr2;
715                 u32 reserved[13];
716                 struct chsc_header response;
717                 u32 status : 8;
718                 u32 : 4;
719                 u32 fmt : 4;
720                 u32 : 16;
721         } __attribute__ ((packed)) *secm_area;
722         int ret, ccode;
723
724         secm_area = page;
725         secm_area->request.length = 0x0050;
726         secm_area->request.code = 0x0016;
727
728         secm_area->key = PAGE_DEFAULT_KEY;
729         secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
730         secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
731
732         secm_area->operation_code = enable ? 0 : 1;
733
734         ccode = chsc(secm_area);
735         if (ccode > 0)
736                 return (ccode == 3) ? -ENODEV : -EBUSY;
737
738         switch (secm_area->response.code) {
739         case 0x0102:
740         case 0x0103:
741                 ret = -EINVAL;
742         default:
743                 ret = chsc_error_from_response(secm_area->response.code);
744         }
745         if (ret != 0)
746                 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
747                               secm_area->response.code);
748         return ret;
749 }
750
751 int
752 chsc_secm(struct channel_subsystem *css, int enable)
753 {
754         void  *secm_area;
755         int ret;
756
757         secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
758         if (!secm_area)
759                 return -ENOMEM;
760
761         mutex_lock(&css->mutex);
762         if (enable && !css->cm_enabled) {
763                 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
764                 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
765                 if (!css->cub_addr1 || !css->cub_addr2) {
766                         free_page((unsigned long)css->cub_addr1);
767                         free_page((unsigned long)css->cub_addr2);
768                         free_page((unsigned long)secm_area);
769                         mutex_unlock(&css->mutex);
770                         return -ENOMEM;
771                 }
772         }
773         ret = __chsc_do_secm(css, enable, secm_area);
774         if (!ret) {
775                 css->cm_enabled = enable;
776                 if (css->cm_enabled) {
777                         ret = chsc_add_cmg_attr(css);
778                         if (ret) {
779                                 memset(secm_area, 0, PAGE_SIZE);
780                                 __chsc_do_secm(css, 0, secm_area);
781                                 css->cm_enabled = 0;
782                         }
783                 } else
784                         chsc_remove_cmg_attr(css);
785         }
786         if (!css->cm_enabled) {
787                 free_page((unsigned long)css->cub_addr1);
788                 free_page((unsigned long)css->cub_addr2);
789         }
790         mutex_unlock(&css->mutex);
791         free_page((unsigned long)secm_area);
792         return ret;
793 }
794
795 int chsc_determine_channel_path_description(struct chp_id chpid,
796                                             struct channel_path_desc *desc)
797 {
798         int ccode, ret;
799
800         struct {
801                 struct chsc_header request;
802                 u32 : 24;
803                 u32 first_chpid : 8;
804                 u32 : 24;
805                 u32 last_chpid : 8;
806                 u32 zeroes1;
807                 struct chsc_header response;
808                 u32 zeroes2;
809                 struct channel_path_desc desc;
810         } __attribute__ ((packed)) *scpd_area;
811
812         scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
813         if (!scpd_area)
814                 return -ENOMEM;
815
816         scpd_area->request.length = 0x0010;
817         scpd_area->request.code = 0x0002;
818
819         scpd_area->first_chpid = chpid.id;
820         scpd_area->last_chpid = chpid.id;
821
822         ccode = chsc(scpd_area);
823         if (ccode > 0) {
824                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
825                 goto out;
826         }
827
828         ret = chsc_error_from_response(scpd_area->response.code);
829         if (ret == 0)
830                 /* Success. */
831                 memcpy(desc, &scpd_area->desc,
832                        sizeof(struct channel_path_desc));
833         else
834                 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
835                               scpd_area->response.code);
836 out:
837         free_page((unsigned long)scpd_area);
838         return ret;
839 }
840
841 static void
842 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
843                           struct cmg_chars *chars)
844 {
845         switch (chp->cmg) {
846         case 2:
847         case 3:
848                 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
849                                          GFP_KERNEL);
850                 if (chp->cmg_chars) {
851                         int i, mask;
852                         struct cmg_chars *cmg_chars;
853
854                         cmg_chars = chp->cmg_chars;
855                         for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
856                                 mask = 0x80 >> (i + 3);
857                                 if (cmcv & mask)
858                                         cmg_chars->values[i] = chars->values[i];
859                                 else
860                                         cmg_chars->values[i] = 0;
861                         }
862                 }
863                 break;
864         default:
865                 /* No cmg-dependent data. */
866                 break;
867         }
868 }
869
870 int chsc_get_channel_measurement_chars(struct channel_path *chp)
871 {
872         int ccode, ret;
873
874         struct {
875                 struct chsc_header request;
876                 u32 : 24;
877                 u32 first_chpid : 8;
878                 u32 : 24;
879                 u32 last_chpid : 8;
880                 u32 zeroes1;
881                 struct chsc_header response;
882                 u32 zeroes2;
883                 u32 not_valid : 1;
884                 u32 shared : 1;
885                 u32 : 22;
886                 u32 chpid : 8;
887                 u32 cmcv : 5;
888                 u32 : 11;
889                 u32 cmgq : 8;
890                 u32 cmg : 8;
891                 u32 zeroes3;
892                 u32 data[NR_MEASUREMENT_CHARS];
893         } __attribute__ ((packed)) *scmc_area;
894
895         scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
896         if (!scmc_area)
897                 return -ENOMEM;
898
899         scmc_area->request.length = 0x0010;
900         scmc_area->request.code = 0x0022;
901
902         scmc_area->first_chpid = chp->chpid.id;
903         scmc_area->last_chpid = chp->chpid.id;
904
905         ccode = chsc(scmc_area);
906         if (ccode > 0) {
907                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
908                 goto out;
909         }
910
911         ret = chsc_error_from_response(scmc_area->response.code);
912         if (ret == 0) {
913                 /* Success. */
914                 if (!scmc_area->not_valid) {
915                         chp->cmg = scmc_area->cmg;
916                         chp->shared = scmc_area->shared;
917                         chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
918                                                   (struct cmg_chars *)
919                                                   &scmc_area->data);
920                 } else {
921                         chp->cmg = -1;
922                         chp->shared = -1;
923                 }
924         } else {
925                 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
926                               scmc_area->response.code);
927         }
928 out:
929         free_page((unsigned long)scmc_area);
930         return ret;
931 }
932
933 int __init chsc_alloc_sei_area(void)
934 {
935         sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
936         if (!sei_page)
937                 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
938                               "chsc machine checks!\n");
939         return (sei_page ? 0 : -ENOMEM);
940 }
941
942 void __init chsc_free_sei_area(void)
943 {
944         kfree(sei_page);
945 }
946
947 int __init
948 chsc_enable_facility(int operation_code)
949 {
950         int ret;
951         struct {
952                 struct chsc_header request;
953                 u8 reserved1:4;
954                 u8 format:4;
955                 u8 reserved2;
956                 u16 operation_code;
957                 u32 reserved3;
958                 u32 reserved4;
959                 u32 operation_data_area[252];
960                 struct chsc_header response;
961                 u32 reserved5:4;
962                 u32 format2:4;
963                 u32 reserved6:24;
964         } __attribute__ ((packed)) *sda_area;
965
966         sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
967         if (!sda_area)
968                 return -ENOMEM;
969         sda_area->request.length = 0x0400;
970         sda_area->request.code = 0x0031;
971         sda_area->operation_code = operation_code;
972
973         ret = chsc(sda_area);
974         if (ret > 0) {
975                 ret = (ret == 3) ? -ENODEV : -EBUSY;
976                 goto out;
977         }
978
979         switch (sda_area->response.code) {
980         case 0x0101:
981                 ret = -EOPNOTSUPP;
982                 break;
983         default:
984                 ret = chsc_error_from_response(sda_area->response.code);
985         }
986         if (ret != 0)
987                 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
988                               operation_code, sda_area->response.code);
989  out:
990         free_page((unsigned long)sda_area);
991         return ret;
992 }
993
994 struct css_general_char css_general_characteristics;
995 struct css_chsc_char css_chsc_characteristics;
996
997 int __init
998 chsc_determine_css_characteristics(void)
999 {
1000         int result;
1001         struct {
1002                 struct chsc_header request;
1003                 u32 reserved1;
1004                 u32 reserved2;
1005                 u32 reserved3;
1006                 struct chsc_header response;
1007                 u32 reserved4;
1008                 u32 general_char[510];
1009                 u32 chsc_char[518];
1010         } __attribute__ ((packed)) *scsc_area;
1011
1012         scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1013         if (!scsc_area)
1014                 return -ENOMEM;
1015
1016         scsc_area->request.length = 0x0010;
1017         scsc_area->request.code = 0x0010;
1018
1019         result = chsc(scsc_area);
1020         if (result) {
1021                 result = (result == 3) ? -ENODEV : -EBUSY;
1022                 goto exit;
1023         }
1024
1025         result = chsc_error_from_response(scsc_area->response.code);
1026         if (result == 0) {
1027                 memcpy(&css_general_characteristics, scsc_area->general_char,
1028                        sizeof(css_general_characteristics));
1029                 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1030                        sizeof(css_chsc_characteristics));
1031         } else
1032                 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
1033                               scsc_area->response.code);
1034 exit:
1035         free_page ((unsigned long) scsc_area);
1036         return result;
1037 }
1038
1039 EXPORT_SYMBOL_GPL(css_general_characteristics);
1040 EXPORT_SYMBOL_GPL(css_chsc_characteristics);