Merge branches 'release', 'acpica', 'bugzilla-10224', 'bugzilla-9772', 'bugzilla...
[linux-2.6] / drivers / s390 / cio / chsc.c
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6  *                            IBM Corporation
7  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
8  *               Cornelia Huck (cornelia.huck@de.ibm.com)
9  *               Arnd Bergmann (arndb@de.ibm.com)
10  */
11
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16
17 #include <asm/cio.h>
18 #include <asm/chpid.h>
19
20 #include "css.h"
21 #include "cio.h"
22 #include "cio_debug.h"
23 #include "ioasm.h"
24 #include "chp.h"
25 #include "chsc.h"
26
27 static void *sei_page;
28
29 static int chsc_error_from_response(int response)
30 {
31         switch (response) {
32         case 0x0001:
33                 return 0;
34         case 0x0002:
35         case 0x0003:
36         case 0x0006:
37         case 0x0007:
38         case 0x0008:
39         case 0x000a:
40                 return -EINVAL;
41         case 0x0004:
42                 return -EOPNOTSUPP;
43         default:
44                 return -EIO;
45         }
46 }
47
48 struct chsc_ssd_area {
49         struct chsc_header request;
50         u16 :10;
51         u16 ssid:2;
52         u16 :4;
53         u16 f_sch;        /* first subchannel */
54         u16 :16;
55         u16 l_sch;        /* last subchannel */
56         u32 :32;
57         struct chsc_header response;
58         u32 :32;
59         u8 sch_valid : 1;
60         u8 dev_valid : 1;
61         u8 st        : 3; /* subchannel type */
62         u8 zeroes    : 3;
63         u8  unit_addr;    /* unit address */
64         u16 devno;        /* device number */
65         u8 path_mask;
66         u8 fla_valid_mask;
67         u16 sch;          /* subchannel */
68         u8 chpid[8];      /* chpids 0-7 */
69         u16 fla[8];       /* full link addresses 0-7 */
70 } __attribute__ ((packed));
71
72 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
73 {
74         unsigned long page;
75         struct chsc_ssd_area *ssd_area;
76         int ccode;
77         int ret;
78         int i;
79         int mask;
80
81         page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
82         if (!page)
83                 return -ENOMEM;
84         ssd_area = (struct chsc_ssd_area *) page;
85         ssd_area->request.length = 0x0010;
86         ssd_area->request.code = 0x0004;
87         ssd_area->ssid = schid.ssid;
88         ssd_area->f_sch = schid.sch_no;
89         ssd_area->l_sch = schid.sch_no;
90
91         ccode = chsc(ssd_area);
92         /* Check response. */
93         if (ccode > 0) {
94                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
95                 goto out_free;
96         }
97         ret = chsc_error_from_response(ssd_area->response.code);
98         if (ret != 0) {
99                 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
100                               schid.ssid, schid.sch_no,
101                               ssd_area->response.code);
102                 goto out_free;
103         }
104         if (!ssd_area->sch_valid) {
105                 ret = -ENODEV;
106                 goto out_free;
107         }
108         /* Copy data */
109         ret = 0;
110         memset(ssd, 0, sizeof(struct chsc_ssd_info));
111         if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
112             (ssd_area->st != SUBCHANNEL_TYPE_MSG))
113                 goto out_free;
114         ssd->path_mask = ssd_area->path_mask;
115         ssd->fla_valid_mask = ssd_area->fla_valid_mask;
116         for (i = 0; i < 8; i++) {
117                 mask = 0x80 >> i;
118                 if (ssd_area->path_mask & mask) {
119                         chp_id_init(&ssd->chpid[i]);
120                         ssd->chpid[i].id = ssd_area->chpid[i];
121                 }
122                 if (ssd_area->fla_valid_mask & mask)
123                         ssd->fla[i] = ssd_area->fla[i];
124         }
125 out_free:
126         free_page(page);
127         return ret;
128 }
129
130 static int check_for_io_on_path(struct subchannel *sch, int mask)
131 {
132         int cc;
133
134         cc = stsch(sch->schid, &sch->schib);
135         if (cc)
136                 return 0;
137         if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
138                 return 1;
139         return 0;
140 }
141
142 static void terminate_internal_io(struct subchannel *sch)
143 {
144         if (cio_clear(sch)) {
145                 /* Recheck device in case clear failed. */
146                 sch->lpm = 0;
147                 if (device_trigger_verify(sch) != 0)
148                         css_schedule_eval(sch->schid);
149                 return;
150         }
151         /* Request retry of internal operation. */
152         device_set_intretry(sch);
153         /* Call handler. */
154         if (sch->driver && sch->driver->termination)
155                 sch->driver->termination(sch);
156 }
157
158 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
159 {
160         int j;
161         int mask;
162         struct chp_id *chpid = data;
163         struct schib schib;
164
165         for (j = 0; j < 8; j++) {
166                 mask = 0x80 >> j;
167                 if ((sch->schib.pmcw.pim & mask) &&
168                     (sch->schib.pmcw.chpid[j] == chpid->id))
169                         break;
170         }
171         if (j >= 8)
172                 return 0;
173
174         spin_lock_irq(sch->lock);
175
176         stsch(sch->schid, &schib);
177         if (!css_sch_is_valid(&schib))
178                 goto out_unreg;
179         memcpy(&sch->schib, &schib, sizeof(struct schib));
180         /* Check for single path devices. */
181         if (sch->schib.pmcw.pim == 0x80)
182                 goto out_unreg;
183
184         if (check_for_io_on_path(sch, mask)) {
185                 if (device_is_online(sch))
186                         device_kill_io(sch);
187                 else {
188                         terminate_internal_io(sch);
189                         /* Re-start path verification. */
190                         if (sch->driver && sch->driver->verify)
191                                 sch->driver->verify(sch);
192                 }
193         } else {
194                 /* trigger path verification. */
195                 if (sch->driver && sch->driver->verify)
196                         sch->driver->verify(sch);
197                 else if (sch->lpm == mask)
198                         goto out_unreg;
199         }
200
201         spin_unlock_irq(sch->lock);
202         return 0;
203
204 out_unreg:
205         sch->lpm = 0;
206         spin_unlock_irq(sch->lock);
207         css_schedule_eval(sch->schid);
208         return 0;
209 }
210
211 void chsc_chp_offline(struct chp_id chpid)
212 {
213         char dbf_txt[15];
214
215         sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
216         CIO_TRACE_EVENT(2, dbf_txt);
217
218         if (chp_get_status(chpid) <= 0)
219                 return;
220         /* Wait until previous actions have settled. */
221         css_wait_for_slow_path();
222         for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
223 }
224
225 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
226 {
227         struct schib schib;
228         /*
229          * We don't know the device yet, but since a path
230          * may be available now to the device we'll have
231          * to do recognition again.
232          * Since we don't have any idea about which chpid
233          * that beast may be on we'll have to do a stsch
234          * on all devices, grr...
235          */
236         if (stsch_err(schid, &schib))
237                 /* We're through */
238                 return -ENXIO;
239
240         /* Put it on the slow path. */
241         css_schedule_eval(schid);
242         return 0;
243 }
244
245 struct res_acc_data {
246         struct chp_id chpid;
247         u32 fla_mask;
248         u16 fla;
249 };
250
251 static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
252                               struct res_acc_data *data)
253 {
254         int i;
255         int mask;
256
257         for (i = 0; i < 8; i++) {
258                 mask = 0x80 >> i;
259                 if (!(ssd->path_mask & mask))
260                         continue;
261                 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
262                         continue;
263                 if ((ssd->fla_valid_mask & mask) &&
264                     ((ssd->fla[i] & data->fla_mask) != data->fla))
265                         continue;
266                 return mask;
267         }
268         return 0;
269 }
270
271 static int __s390_process_res_acc(struct subchannel *sch, void *data)
272 {
273         int chp_mask, old_lpm;
274         struct res_acc_data *res_data = data;
275
276         spin_lock_irq(sch->lock);
277         chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
278         if (chp_mask == 0)
279                 goto out;
280         if (stsch(sch->schid, &sch->schib))
281                 goto out;
282         old_lpm = sch->lpm;
283         sch->lpm = ((sch->schib.pmcw.pim &
284                      sch->schib.pmcw.pam &
285                      sch->schib.pmcw.pom)
286                     | chp_mask) & sch->opm;
287         if (!old_lpm && sch->lpm)
288                 device_trigger_reprobe(sch);
289         else if (sch->driver && sch->driver->verify)
290                 sch->driver->verify(sch);
291 out:
292         spin_unlock_irq(sch->lock);
293
294         return 0;
295 }
296
297 static void s390_process_res_acc (struct res_acc_data *res_data)
298 {
299         char dbf_txt[15];
300
301         sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
302                 res_data->chpid.id);
303         CIO_TRACE_EVENT( 2, dbf_txt);
304         if (res_data->fla != 0) {
305                 sprintf(dbf_txt, "fla%x", res_data->fla);
306                 CIO_TRACE_EVENT( 2, dbf_txt);
307         }
308         /* Wait until previous actions have settled. */
309         css_wait_for_slow_path();
310         /*
311          * I/O resources may have become accessible.
312          * Scan through all subchannels that may be concerned and
313          * do a validation on those.
314          * The more information we have (info), the less scanning
315          * will we have to do.
316          */
317         for_each_subchannel_staged(__s390_process_res_acc,
318                                    s390_process_res_acc_new_sch, res_data);
319 }
320
321 static int
322 __get_chpid_from_lir(void *data)
323 {
324         struct lir {
325                 u8  iq;
326                 u8  ic;
327                 u16 sci;
328                 /* incident-node descriptor */
329                 u32 indesc[28];
330                 /* attached-node descriptor */
331                 u32 andesc[28];
332                 /* incident-specific information */
333                 u32 isinfo[28];
334         } __attribute__ ((packed)) *lir;
335
336         lir = data;
337         if (!(lir->iq&0x80))
338                 /* NULL link incident record */
339                 return -EINVAL;
340         if (!(lir->indesc[0]&0xc0000000))
341                 /* node descriptor not valid */
342                 return -EINVAL;
343         if (!(lir->indesc[0]&0x10000000))
344                 /* don't handle device-type nodes - FIXME */
345                 return -EINVAL;
346         /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
347
348         return (u16) (lir->indesc[0]&0x000000ff);
349 }
350
351 struct chsc_sei_area {
352         struct chsc_header request;
353         u32 reserved1;
354         u32 reserved2;
355         u32 reserved3;
356         struct chsc_header response;
357         u32 reserved4;
358         u8  flags;
359         u8  vf;         /* validity flags */
360         u8  rs;         /* reporting source */
361         u8  cc;         /* content code */
362         u16 fla;        /* full link address */
363         u16 rsid;       /* reporting source id */
364         u32 reserved5;
365         u32 reserved6;
366         u8 ccdf[4096 - 16 - 24];        /* content-code dependent field */
367         /* ccdf has to be big enough for a link-incident record */
368 } __attribute__ ((packed));
369
370 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
371 {
372         struct chp_id chpid;
373         int id;
374
375         CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
376                       sei_area->rs, sei_area->rsid);
377         if (sei_area->rs != 4)
378                 return;
379         id = __get_chpid_from_lir(sei_area->ccdf);
380         if (id < 0)
381                 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
382         else {
383                 chp_id_init(&chpid);
384                 chpid.id = id;
385                 chsc_chp_offline(chpid);
386         }
387 }
388
389 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
390 {
391         struct res_acc_data res_data;
392         struct chp_id chpid;
393         int status;
394
395         CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
396                       "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
397         if (sei_area->rs != 4)
398                 return;
399         chp_id_init(&chpid);
400         chpid.id = sei_area->rsid;
401         /* allocate a new channel path structure, if needed */
402         status = chp_get_status(chpid);
403         if (status < 0)
404                 chp_new(chpid);
405         else if (!status)
406                 return;
407         memset(&res_data, 0, sizeof(struct res_acc_data));
408         res_data.chpid = chpid;
409         if ((sei_area->vf & 0xc0) != 0) {
410                 res_data.fla = sei_area->fla;
411                 if ((sei_area->vf & 0xc0) == 0xc0)
412                         /* full link address */
413                         res_data.fla_mask = 0xffff;
414                 else
415                         /* link address */
416                         res_data.fla_mask = 0xff00;
417         }
418         s390_process_res_acc(&res_data);
419 }
420
421 struct chp_config_data {
422         u8 map[32];
423         u8 op;
424         u8 pc;
425 };
426
427 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
428 {
429         struct chp_config_data *data;
430         struct chp_id chpid;
431         int num;
432
433         CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
434         if (sei_area->rs != 0)
435                 return;
436         data = (struct chp_config_data *) &(sei_area->ccdf);
437         chp_id_init(&chpid);
438         for (num = 0; num <= __MAX_CHPID; num++) {
439                 if (!chp_test_bit(data->map, num))
440                         continue;
441                 chpid.id = num;
442                 printk(KERN_WARNING "cio: processing configure event %d for "
443                        "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
444                 switch (data->op) {
445                 case 0:
446                         chp_cfg_schedule(chpid, 1);
447                         break;
448                 case 1:
449                         chp_cfg_schedule(chpid, 0);
450                         break;
451                 case 2:
452                         chp_cfg_cancel_deconfigure(chpid);
453                         break;
454                 }
455         }
456 }
457
458 static void chsc_process_sei(struct chsc_sei_area *sei_area)
459 {
460         /* Check if we might have lost some information. */
461         if (sei_area->flags & 0x40) {
462                 CIO_CRW_EVENT(2, "chsc: event overflow\n");
463                 css_schedule_eval_all();
464         }
465         /* which kind of information was stored? */
466         switch (sei_area->cc) {
467         case 1: /* link incident*/
468                 chsc_process_sei_link_incident(sei_area);
469                 break;
470         case 2: /* i/o resource accessibiliy */
471                 chsc_process_sei_res_acc(sei_area);
472                 break;
473         case 8: /* channel-path-configuration notification */
474                 chsc_process_sei_chp_config(sei_area);
475                 break;
476         default: /* other stuff */
477                 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
478                               sei_area->cc);
479                 break;
480         }
481 }
482
483 void chsc_process_crw(void)
484 {
485         struct chsc_sei_area *sei_area;
486
487         if (!sei_page)
488                 return;
489         /* Access to sei_page is serialized through machine check handler
490          * thread, so no need for locking. */
491         sei_area = sei_page;
492
493         CIO_TRACE_EVENT( 2, "prcss");
494         do {
495                 memset(sei_area, 0, sizeof(*sei_area));
496                 sei_area->request.length = 0x0010;
497                 sei_area->request.code = 0x000e;
498                 if (chsc(sei_area))
499                         break;
500
501                 if (sei_area->response.code == 0x0001) {
502                         CIO_CRW_EVENT(4, "chsc: sei successful\n");
503                         chsc_process_sei(sei_area);
504                 } else {
505                         CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
506                                       sei_area->response.code);
507                         break;
508                 }
509         } while (sei_area->flags & 0x80);
510 }
511
512 static int __chp_add_new_sch(struct subchannel_id schid, void *data)
513 {
514         struct schib schib;
515
516         if (stsch_err(schid, &schib))
517                 /* We're through */
518                 return -ENXIO;
519
520         /* Put it on the slow path. */
521         css_schedule_eval(schid);
522         return 0;
523 }
524
525
526 static int __chp_add(struct subchannel *sch, void *data)
527 {
528         int i, mask;
529         struct chp_id *chpid = data;
530
531         spin_lock_irq(sch->lock);
532         for (i=0; i<8; i++) {
533                 mask = 0x80 >> i;
534                 if ((sch->schib.pmcw.pim & mask) &&
535                     (sch->schib.pmcw.chpid[i] == chpid->id))
536                         break;
537         }
538         if (i==8) {
539                 spin_unlock_irq(sch->lock);
540                 return 0;
541         }
542         if (stsch(sch->schid, &sch->schib)) {
543                 spin_unlock_irq(sch->lock);
544                 css_schedule_eval(sch->schid);
545                 return 0;
546         }
547         sch->lpm = ((sch->schib.pmcw.pim &
548                      sch->schib.pmcw.pam &
549                      sch->schib.pmcw.pom)
550                     | mask) & sch->opm;
551
552         if (sch->driver && sch->driver->verify)
553                 sch->driver->verify(sch);
554
555         spin_unlock_irq(sch->lock);
556
557         return 0;
558 }
559
560 void chsc_chp_online(struct chp_id chpid)
561 {
562         char dbf_txt[15];
563
564         sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
565         CIO_TRACE_EVENT(2, dbf_txt);
566
567         if (chp_get_status(chpid) != 0) {
568                 /* Wait until previous actions have settled. */
569                 css_wait_for_slow_path();
570                 for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
571                                            &chpid);
572         }
573 }
574
575 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
576                                          struct chp_id chpid, int on)
577 {
578         int chp, old_lpm;
579         int mask;
580         unsigned long flags;
581
582         spin_lock_irqsave(sch->lock, flags);
583         old_lpm = sch->lpm;
584         for (chp = 0; chp < 8; chp++) {
585                 mask = 0x80 >> chp;
586                 if (!(sch->ssd_info.path_mask & mask))
587                         continue;
588                 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
589                         continue;
590
591                 if (on) {
592                         sch->opm |= mask;
593                         sch->lpm |= mask;
594                         if (!old_lpm)
595                                 device_trigger_reprobe(sch);
596                         else if (sch->driver && sch->driver->verify)
597                                 sch->driver->verify(sch);
598                         break;
599                 }
600                 sch->opm &= ~mask;
601                 sch->lpm &= ~mask;
602                 if (check_for_io_on_path(sch, mask)) {
603                         if (device_is_online(sch))
604                                 /* Path verification is done after killing. */
605                                 device_kill_io(sch);
606                         else {
607                                 /* Kill and retry internal I/O. */
608                                 terminate_internal_io(sch);
609                                 /* Re-start path verification. */
610                                 if (sch->driver && sch->driver->verify)
611                                         sch->driver->verify(sch);
612                         }
613                 } else if (!sch->lpm) {
614                         if (device_trigger_verify(sch) != 0)
615                                 css_schedule_eval(sch->schid);
616                 } else if (sch->driver && sch->driver->verify)
617                         sch->driver->verify(sch);
618                 break;
619         }
620         spin_unlock_irqrestore(sch->lock, flags);
621 }
622
623 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
624 {
625         struct chp_id *chpid = data;
626
627         __s390_subchannel_vary_chpid(sch, *chpid, 0);
628         return 0;
629 }
630
631 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
632 {
633         struct chp_id *chpid = data;
634
635         __s390_subchannel_vary_chpid(sch, *chpid, 1);
636         return 0;
637 }
638
639 static int
640 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
641 {
642         struct schib schib;
643
644         if (stsch_err(schid, &schib))
645                 /* We're through */
646                 return -ENXIO;
647         /* Put it on the slow path. */
648         css_schedule_eval(schid);
649         return 0;
650 }
651
652 /**
653  * chsc_chp_vary - propagate channel-path vary operation to subchannels
654  * @chpid: channl-path ID
655  * @on: non-zero for vary online, zero for vary offline
656  */
657 int chsc_chp_vary(struct chp_id chpid, int on)
658 {
659         /* Wait until previous actions have settled. */
660         css_wait_for_slow_path();
661         /*
662          * Redo PathVerification on the devices the chpid connects to
663          */
664
665         if (on)
666                 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
667                                            __s390_vary_chpid_on, &chpid);
668         else
669                 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
670                                            NULL, &chpid);
671
672         return 0;
673 }
674
675 static void
676 chsc_remove_cmg_attr(struct channel_subsystem *css)
677 {
678         int i;
679
680         for (i = 0; i <= __MAX_CHPID; i++) {
681                 if (!css->chps[i])
682                         continue;
683                 chp_remove_cmg_attr(css->chps[i]);
684         }
685 }
686
687 static int
688 chsc_add_cmg_attr(struct channel_subsystem *css)
689 {
690         int i, ret;
691
692         ret = 0;
693         for (i = 0; i <= __MAX_CHPID; i++) {
694                 if (!css->chps[i])
695                         continue;
696                 ret = chp_add_cmg_attr(css->chps[i]);
697                 if (ret)
698                         goto cleanup;
699         }
700         return ret;
701 cleanup:
702         for (--i; i >= 0; i--) {
703                 if (!css->chps[i])
704                         continue;
705                 chp_remove_cmg_attr(css->chps[i]);
706         }
707         return ret;
708 }
709
710 static int
711 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
712 {
713         struct {
714                 struct chsc_header request;
715                 u32 operation_code : 2;
716                 u32 : 30;
717                 u32 key : 4;
718                 u32 : 28;
719                 u32 zeroes1;
720                 u32 cub_addr1;
721                 u32 zeroes2;
722                 u32 cub_addr2;
723                 u32 reserved[13];
724                 struct chsc_header response;
725                 u32 status : 8;
726                 u32 : 4;
727                 u32 fmt : 4;
728                 u32 : 16;
729         } __attribute__ ((packed)) *secm_area;
730         int ret, ccode;
731
732         secm_area = page;
733         secm_area->request.length = 0x0050;
734         secm_area->request.code = 0x0016;
735
736         secm_area->key = PAGE_DEFAULT_KEY;
737         secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
738         secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
739
740         secm_area->operation_code = enable ? 0 : 1;
741
742         ccode = chsc(secm_area);
743         if (ccode > 0)
744                 return (ccode == 3) ? -ENODEV : -EBUSY;
745
746         switch (secm_area->response.code) {
747         case 0x0102:
748         case 0x0103:
749                 ret = -EINVAL;
750         default:
751                 ret = chsc_error_from_response(secm_area->response.code);
752         }
753         if (ret != 0)
754                 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
755                               secm_area->response.code);
756         return ret;
757 }
758
759 int
760 chsc_secm(struct channel_subsystem *css, int enable)
761 {
762         void  *secm_area;
763         int ret;
764
765         secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
766         if (!secm_area)
767                 return -ENOMEM;
768
769         if (enable && !css->cm_enabled) {
770                 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
771                 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
772                 if (!css->cub_addr1 || !css->cub_addr2) {
773                         free_page((unsigned long)css->cub_addr1);
774                         free_page((unsigned long)css->cub_addr2);
775                         free_page((unsigned long)secm_area);
776                         return -ENOMEM;
777                 }
778         }
779         ret = __chsc_do_secm(css, enable, secm_area);
780         if (!ret) {
781                 css->cm_enabled = enable;
782                 if (css->cm_enabled) {
783                         ret = chsc_add_cmg_attr(css);
784                         if (ret) {
785                                 memset(secm_area, 0, PAGE_SIZE);
786                                 __chsc_do_secm(css, 0, secm_area);
787                                 css->cm_enabled = 0;
788                         }
789                 } else
790                         chsc_remove_cmg_attr(css);
791         }
792         if (!css->cm_enabled) {
793                 free_page((unsigned long)css->cub_addr1);
794                 free_page((unsigned long)css->cub_addr2);
795         }
796         free_page((unsigned long)secm_area);
797         return ret;
798 }
799
800 int chsc_determine_channel_path_description(struct chp_id chpid,
801                                             struct channel_path_desc *desc)
802 {
803         int ccode, ret;
804
805         struct {
806                 struct chsc_header request;
807                 u32 : 24;
808                 u32 first_chpid : 8;
809                 u32 : 24;
810                 u32 last_chpid : 8;
811                 u32 zeroes1;
812                 struct chsc_header response;
813                 u32 zeroes2;
814                 struct channel_path_desc desc;
815         } __attribute__ ((packed)) *scpd_area;
816
817         scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
818         if (!scpd_area)
819                 return -ENOMEM;
820
821         scpd_area->request.length = 0x0010;
822         scpd_area->request.code = 0x0002;
823
824         scpd_area->first_chpid = chpid.id;
825         scpd_area->last_chpid = chpid.id;
826
827         ccode = chsc(scpd_area);
828         if (ccode > 0) {
829                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
830                 goto out;
831         }
832
833         ret = chsc_error_from_response(scpd_area->response.code);
834         if (ret == 0)
835                 /* Success. */
836                 memcpy(desc, &scpd_area->desc,
837                        sizeof(struct channel_path_desc));
838         else
839                 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
840                               scpd_area->response.code);
841 out:
842         free_page((unsigned long)scpd_area);
843         return ret;
844 }
845
846 static void
847 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
848                           struct cmg_chars *chars)
849 {
850         switch (chp->cmg) {
851         case 2:
852         case 3:
853                 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
854                                          GFP_KERNEL);
855                 if (chp->cmg_chars) {
856                         int i, mask;
857                         struct cmg_chars *cmg_chars;
858
859                         cmg_chars = chp->cmg_chars;
860                         for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
861                                 mask = 0x80 >> (i + 3);
862                                 if (cmcv & mask)
863                                         cmg_chars->values[i] = chars->values[i];
864                                 else
865                                         cmg_chars->values[i] = 0;
866                         }
867                 }
868                 break;
869         default:
870                 /* No cmg-dependent data. */
871                 break;
872         }
873 }
874
875 int chsc_get_channel_measurement_chars(struct channel_path *chp)
876 {
877         int ccode, ret;
878
879         struct {
880                 struct chsc_header request;
881                 u32 : 24;
882                 u32 first_chpid : 8;
883                 u32 : 24;
884                 u32 last_chpid : 8;
885                 u32 zeroes1;
886                 struct chsc_header response;
887                 u32 zeroes2;
888                 u32 not_valid : 1;
889                 u32 shared : 1;
890                 u32 : 22;
891                 u32 chpid : 8;
892                 u32 cmcv : 5;
893                 u32 : 11;
894                 u32 cmgq : 8;
895                 u32 cmg : 8;
896                 u32 zeroes3;
897                 u32 data[NR_MEASUREMENT_CHARS];
898         } __attribute__ ((packed)) *scmc_area;
899
900         scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
901         if (!scmc_area)
902                 return -ENOMEM;
903
904         scmc_area->request.length = 0x0010;
905         scmc_area->request.code = 0x0022;
906
907         scmc_area->first_chpid = chp->chpid.id;
908         scmc_area->last_chpid = chp->chpid.id;
909
910         ccode = chsc(scmc_area);
911         if (ccode > 0) {
912                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
913                 goto out;
914         }
915
916         ret = chsc_error_from_response(scmc_area->response.code);
917         if (ret == 0) {
918                 /* Success. */
919                 if (!scmc_area->not_valid) {
920                         chp->cmg = scmc_area->cmg;
921                         chp->shared = scmc_area->shared;
922                         chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
923                                                   (struct cmg_chars *)
924                                                   &scmc_area->data);
925                 } else {
926                         chp->cmg = -1;
927                         chp->shared = -1;
928                 }
929         } else {
930                 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
931                               scmc_area->response.code);
932         }
933 out:
934         free_page((unsigned long)scmc_area);
935         return ret;
936 }
937
938 int __init chsc_alloc_sei_area(void)
939 {
940         sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
941         if (!sei_page)
942                 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
943                               "chsc machine checks!\n");
944         return (sei_page ? 0 : -ENOMEM);
945 }
946
947 void __init chsc_free_sei_area(void)
948 {
949         kfree(sei_page);
950 }
951
952 int __init
953 chsc_enable_facility(int operation_code)
954 {
955         int ret;
956         struct {
957                 struct chsc_header request;
958                 u8 reserved1:4;
959                 u8 format:4;
960                 u8 reserved2;
961                 u16 operation_code;
962                 u32 reserved3;
963                 u32 reserved4;
964                 u32 operation_data_area[252];
965                 struct chsc_header response;
966                 u32 reserved5:4;
967                 u32 format2:4;
968                 u32 reserved6:24;
969         } __attribute__ ((packed)) *sda_area;
970
971         sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
972         if (!sda_area)
973                 return -ENOMEM;
974         sda_area->request.length = 0x0400;
975         sda_area->request.code = 0x0031;
976         sda_area->operation_code = operation_code;
977
978         ret = chsc(sda_area);
979         if (ret > 0) {
980                 ret = (ret == 3) ? -ENODEV : -EBUSY;
981                 goto out;
982         }
983
984         switch (sda_area->response.code) {
985         case 0x0101:
986                 ret = -EOPNOTSUPP;
987                 break;
988         default:
989                 ret = chsc_error_from_response(sda_area->response.code);
990         }
991         if (ret != 0)
992                 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
993                               operation_code, sda_area->response.code);
994  out:
995         free_page((unsigned long)sda_area);
996         return ret;
997 }
998
999 struct css_general_char css_general_characteristics;
1000 struct css_chsc_char css_chsc_characteristics;
1001
1002 int __init
1003 chsc_determine_css_characteristics(void)
1004 {
1005         int result;
1006         struct {
1007                 struct chsc_header request;
1008                 u32 reserved1;
1009                 u32 reserved2;
1010                 u32 reserved3;
1011                 struct chsc_header response;
1012                 u32 reserved4;
1013                 u32 general_char[510];
1014                 u32 chsc_char[518];
1015         } __attribute__ ((packed)) *scsc_area;
1016
1017         scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1018         if (!scsc_area)
1019                 return -ENOMEM;
1020
1021         scsc_area->request.length = 0x0010;
1022         scsc_area->request.code = 0x0010;
1023
1024         result = chsc(scsc_area);
1025         if (result) {
1026                 result = (result == 3) ? -ENODEV : -EBUSY;
1027                 goto exit;
1028         }
1029
1030         result = chsc_error_from_response(scsc_area->response.code);
1031         if (result == 0) {
1032                 memcpy(&css_general_characteristics, scsc_area->general_char,
1033                        sizeof(css_general_characteristics));
1034                 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1035                        sizeof(css_chsc_characteristics));
1036         } else
1037                 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
1038                               scsc_area->response.code);
1039 exit:
1040         free_page ((unsigned long) scsc_area);
1041         return result;
1042 }
1043
1044 EXPORT_SYMBOL_GPL(css_general_characteristics);
1045 EXPORT_SYMBOL_GPL(css_chsc_characteristics);