2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com)
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
21 #include "cio_debug.h"
25 static void *sei_page;
27 static int new_channel_path(int chpid);
30 set_chp_logically_online(int chp, int onoff)
32 css[0]->chps[chp]->state = onoff;
36 get_chp_status(int chp)
38 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
42 chsc_validate_chpids(struct subchannel *sch)
46 for (chp = 0; chp <= 7; chp++) {
48 if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
49 /* disable using this path */
55 chpid_is_actually_online(int chp)
59 state = get_chp_status(chp);
62 queue_work(slow_path_wq, &slow_path_work);
67 /* FIXME: this is _always_ called for every subchannel. shouldn't we
68 * process more than one at a time? */
70 chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
75 struct chsc_header request;
79 u16 f_sch; /* first subchannel */
81 u16 l_sch; /* last subchannel */
83 struct chsc_header response;
87 u8 st : 3; /* subchannel type */
89 u8 unit_addr; /* unit address */
90 u16 devno; /* device number */
93 u16 sch; /* subchannel */
94 u8 chpid[8]; /* chpids 0-7 */
95 u16 fla[8]; /* full link addresses 0-7 */
100 ssd_area->request.length = 0x0010;
101 ssd_area->request.code = 0x0004;
103 ssd_area->ssid = sch->schid.ssid;
104 ssd_area->f_sch = sch->schid.sch_no;
105 ssd_area->l_sch = sch->schid.sch_no;
107 ccode = chsc(ssd_area);
109 pr_debug("chsc returned with ccode = %d\n", ccode);
110 return (ccode == 3) ? -ENODEV : -EBUSY;
113 switch (ssd_area->response.code) {
114 case 0x0001: /* everything ok */
117 CIO_CRW_EVENT(2, "Invalid command!\n");
120 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
123 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
126 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
127 ssd_area->response.code);
132 * ssd_area->st stores the type of the detected
133 * subchannel, with the following definitions:
135 * 0: I/O subchannel: All fields have meaning
136 * 1: CHSC subchannel: Only sch_val, st and sch
138 * 2: Message subchannel: All fields except unit_addr
140 * 3: ADM subchannel: Only sch_val, st and sch
143 * Other types are currently undefined.
145 if (ssd_area->st > 3) { /* uhm, that looks strange... */
146 CIO_CRW_EVENT(0, "Strange subchannel type %d"
147 " for sch 0.%x.%04x\n", ssd_area->st,
148 sch->schid.ssid, sch->schid.sch_no);
150 * There may have been a new subchannel type defined in the
151 * time since this code was written; since we don't know which
152 * fields have meaning and what to do with it we just jump out
156 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
157 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
158 sch->schid.ssid, sch->schid.sch_no,
161 sch->ssd_info.valid = 1;
162 sch->ssd_info.type = ssd_area->st;
165 if (ssd_area->st == 0 || ssd_area->st == 2) {
166 for (j = 0; j < 8; j++) {
167 if (!((0x80 >> j) & ssd_area->path_mask &
168 ssd_area->fla_valid_mask))
170 sch->ssd_info.chpid[j] = ssd_area->chpid[j];
171 sch->ssd_info.fla[j] = ssd_area->fla[j];
178 css_get_ssd_info(struct subchannel *sch)
183 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
186 spin_lock_irq(&sch->lock);
187 ret = chsc_get_sch_desc_irq(sch, page);
189 static int cio_chsc_err_msg;
191 if (!cio_chsc_err_msg) {
193 "chsc_get_sch_descriptions:"
194 " Error %d while doing chsc; "
195 "processing some machine checks may "
197 cio_chsc_err_msg = 1;
200 spin_unlock_irq(&sch->lock);
201 free_page((unsigned long)page);
204 /* Allocate channel path structures, if needed. */
205 for (j = 0; j < 8; j++) {
207 chpid = sch->ssd_info.chpid[j];
208 if ((sch->schib.pmcw.pim & mask) &&
209 (get_chp_status(chpid) < 0))
210 new_channel_path(chpid);
217 s390_subchannel_remove_chpid(struct device *dev, void *data)
221 struct subchannel *sch;
222 struct channel_path *chpid;
225 sch = to_subchannel(dev);
227 for (j = 0; j < 8; j++) {
229 if ((sch->schib.pmcw.pim & mask) &&
230 (sch->schib.pmcw.chpid[j] == chpid->id))
236 spin_lock_irq(&sch->lock);
238 stsch(sch->schid, &schib);
241 memcpy(&sch->schib, &schib, sizeof(struct schib));
242 /* Check for single path devices. */
243 if (sch->schib.pmcw.pim == 0x80)
246 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
247 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
248 (sch->schib.pmcw.lpum == mask)) {
254 /* Request retry of internal operation. */
255 device_set_intretry(sch);
257 if (sch->driver && sch->driver->termination)
258 sch->driver->termination(&sch->dev);
262 /* trigger path verification. */
263 if (sch->driver && sch->driver->verify)
264 sch->driver->verify(&sch->dev);
265 else if (sch->lpm == mask)
268 spin_unlock_irq(&sch->lock);
271 spin_unlock_irq(&sch->lock);
273 if (css_enqueue_subchannel_slow(sch->schid)) {
274 css_clear_subchannel_slow_list();
281 s390_set_chpid_offline( __u8 chpid)
286 sprintf(dbf_txt, "chpr%x", chpid);
287 CIO_TRACE_EVENT(2, dbf_txt);
289 if (get_chp_status(chpid) <= 0)
291 dev = get_device(&css[0]->chps[chpid]->dev);
292 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
293 s390_subchannel_remove_chpid);
295 if (need_rescan || css_slow_subchannels_exist())
296 queue_work(slow_path_wq, &slow_path_work);
300 struct res_acc_data {
301 struct channel_path *chp;
307 s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
314 for (chp = 0; chp <= 7; chp++)
316 * check if chpid is in information updated by ssd
318 if (sch->ssd_info.valid &&
319 sch->ssd_info.chpid[chp] == res_data->chp->id &&
320 (sch->ssd_info.fla[chp] & res_data->fla_mask)
330 * Do a stsch to update our subchannel structure with the
331 * new path information and eventually check for logically
334 ccode = stsch(sch->schid, &sch->schib);
342 s390_process_res_acc_new_sch(struct subchannel_id schid)
347 * We don't know the device yet, but since a path
348 * may be available now to the device we'll have
349 * to do recognition again.
350 * Since we don't have any idea about which chpid
351 * that beast may be on we'll have to do a stsch
352 * on all devices, grr...
354 if (stsch_err(schid, &schib))
356 return need_rescan ? -EAGAIN : -ENXIO;
358 /* Put it on the slow path. */
359 ret = css_enqueue_subchannel_slow(schid);
361 css_clear_subchannel_slow_list();
369 __s390_process_res_acc(struct subchannel_id schid, void *data)
371 int chp_mask, old_lpm;
372 struct res_acc_data *res_data;
373 struct subchannel *sch;
376 sch = get_subchannel_by_schid(schid);
378 /* Check if a subchannel is newly available. */
379 return s390_process_res_acc_new_sch(schid);
381 spin_lock_irq(&sch->lock);
383 chp_mask = s390_process_res_acc_sch(res_data, sch);
386 spin_unlock_irq(&sch->lock);
387 put_device(&sch->dev);
391 sch->lpm = ((sch->schib.pmcw.pim &
392 sch->schib.pmcw.pam &
394 | chp_mask) & sch->opm;
395 if (!old_lpm && sch->lpm)
396 device_trigger_reprobe(sch);
397 else if (sch->driver && sch->driver->verify)
398 sch->driver->verify(&sch->dev);
400 spin_unlock_irq(&sch->lock);
401 put_device(&sch->dev);
407 s390_process_res_acc (struct res_acc_data *res_data)
412 sprintf(dbf_txt, "accpr%x", res_data->chp->id);
413 CIO_TRACE_EVENT( 2, dbf_txt);
414 if (res_data->fla != 0) {
415 sprintf(dbf_txt, "fla%x", res_data->fla);
416 CIO_TRACE_EVENT( 2, dbf_txt);
420 * I/O resources may have become accessible.
421 * Scan through all subchannels that may be concerned and
422 * do a validation on those.
423 * The more information we have (info), the less scanning
424 * will we have to do.
426 rc = for_each_subchannel(__s390_process_res_acc, res_data);
427 if (css_slow_subchannels_exist())
429 else if (rc != -EAGAIN)
435 __get_chpid_from_lir(void *data)
441 /* incident-node descriptor */
443 /* attached-node descriptor */
445 /* incident-specific information */
451 /* NULL link incident record */
453 if (!(lir->indesc[0]&0xc0000000))
454 /* node descriptor not valid */
456 if (!(lir->indesc[0]&0x10000000))
457 /* don't handle device-type nodes - FIXME */
459 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
461 return (u16) (lir->indesc[0]&0x000000ff);
465 chsc_process_crw(void)
468 struct res_acc_data res_data;
470 struct chsc_header request;
474 struct chsc_header response;
477 u8 vf; /* validity flags */
478 u8 rs; /* reporting source */
479 u8 cc; /* content code */
480 u16 fla; /* full link address */
481 u16 rsid; /* reporting source id */
484 u32 ccdf[96]; /* content-code dependent field */
485 /* ccdf has to be big enough for a link-incident record */
491 * build the chsc request block for store event information
493 * This function is only called by the machine check handler thread,
494 * so we don't need locking for the sei_page.
498 CIO_TRACE_EVENT( 2, "prcss");
503 memset(sei_area, 0, sizeof(*sei_area));
504 memset(&res_data, 0, sizeof(struct res_acc_data));
505 sei_area->request.length = 0x0010;
506 sei_area->request.code = 0x000e;
508 ccode = chsc(sei_area);
512 switch (sei_area->response.code) {
513 /* for debug purposes, check for problems */
515 CIO_CRW_EVENT(4, "chsc_process_crw: event information "
516 "successfully stored\n");
517 break; /* everything ok */
520 "chsc_process_crw: invalid command!\n");
523 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
527 CIO_CRW_EVENT(2, "chsc_process_crw: no event "
528 "information stored\n");
531 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
532 sei_area->response.code);
536 /* Check if we might have lost some information. */
537 if (sei_area->flags & 0x40)
538 CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
539 "has been lost due to overflow!\n");
541 if (sei_area->rs != 4) {
542 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
543 "(%04X) isn't a chpid!\n",
548 /* which kind of information was stored? */
549 switch (sei_area->cc) {
550 case 1: /* link incident*/
551 CIO_CRW_EVENT(4, "chsc_process_crw: "
552 "channel subsystem reports link incident,"
553 " reporting source is chpid %x\n",
555 chpid = __get_chpid_from_lir(sei_area->ccdf);
557 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
560 s390_set_chpid_offline(chpid);
563 case 2: /* i/o resource accessibiliy */
564 CIO_CRW_EVENT(4, "chsc_process_crw: "
565 "channel subsystem reports some I/O "
566 "devices may have become accessible\n");
567 pr_debug("Data received after sei: \n");
568 pr_debug("Validity flags: %x\n", sei_area->vf);
570 /* allocate a new channel path structure, if needed */
571 status = get_chp_status(sei_area->rsid);
573 new_channel_path(sei_area->rsid);
576 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
577 res_data.chp = to_channelpath(dev);
578 pr_debug("chpid: %x", sei_area->rsid);
579 if ((sei_area->vf & 0xc0) != 0) {
580 res_data.fla = sei_area->fla;
581 if ((sei_area->vf & 0xc0) == 0xc0) {
582 pr_debug(" full link addr: %x",
584 res_data.fla_mask = 0xffff;
586 pr_debug(" link addr: %x",
588 res_data.fla_mask = 0xff00;
591 ret = s390_process_res_acc(&res_data);
596 default: /* other stuff */
597 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
601 } while (sei_area->flags & 0x80);
606 __chp_add_new_sch(struct subchannel_id schid)
611 if (stsch(schid, &schib))
613 return need_rescan ? -EAGAIN : -ENXIO;
615 /* Put it on the slow path. */
616 ret = css_enqueue_subchannel_slow(schid);
618 css_clear_subchannel_slow_list();
627 __chp_add(struct subchannel_id schid, void *data)
630 struct channel_path *chp;
631 struct subchannel *sch;
634 sch = get_subchannel_by_schid(schid);
636 /* Check if the subchannel is now available. */
637 return __chp_add_new_sch(schid);
638 spin_lock_irq(&sch->lock);
639 for (i=0; i<8; i++) {
641 if ((sch->schib.pmcw.pim & mask) &&
642 (sch->schib.pmcw.chpid[i] == chp->id)) {
643 if (stsch(sch->schid, &sch->schib) != 0) {
645 spin_unlock_irq(&sch->lock);
652 spin_unlock_irq(&sch->lock);
655 sch->lpm = ((sch->schib.pmcw.pim &
656 sch->schib.pmcw.pam &
660 if (sch->driver && sch->driver->verify)
661 sch->driver->verify(&sch->dev);
663 spin_unlock_irq(&sch->lock);
664 put_device(&sch->dev);
675 if (!get_chp_status(chpid))
676 return 0; /* no need to do the rest */
678 sprintf(dbf_txt, "cadd%x", chpid);
679 CIO_TRACE_EVENT(2, dbf_txt);
681 dev = get_device(&css[0]->chps[chpid]->dev);
682 rc = for_each_subchannel(__chp_add, to_channelpath(dev));
683 if (css_slow_subchannels_exist())
692 * Handling of crw machine checks with channel path source.
695 chp_process_crw(int chpid, int on)
698 /* Path has gone. We use the link incident routine.*/
699 s390_set_chpid_offline(chpid);
700 return 0; /* De-register is async anyway. */
703 * Path has come. Allocate a new channel path structure,
706 if (get_chp_status(chpid) < 0)
707 new_channel_path(chpid);
708 /* Avoid the extra overhead in process_rec_acc. */
709 return chp_add(chpid);
712 static inline int check_for_io_on_path(struct subchannel *sch, int index)
716 cc = stsch(sch->schid, &sch->schib);
719 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
724 static void terminate_internal_io(struct subchannel *sch)
726 if (cio_clear(sch)) {
727 /* Recheck device in case clear failed. */
729 if (device_trigger_verify(sch) != 0) {
730 if(css_enqueue_subchannel_slow(sch->schid)) {
731 css_clear_subchannel_slow_list();
737 /* Request retry of internal operation. */
738 device_set_intretry(sch);
740 if (sch->driver && sch->driver->termination)
741 sch->driver->termination(&sch->dev);
745 __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
750 if (!sch->ssd_info.valid)
753 spin_lock_irqsave(&sch->lock, flags);
755 for (chp = 0; chp < 8; chp++) {
756 if (sch->ssd_info.chpid[chp] != chpid)
760 sch->opm |= (0x80 >> chp);
761 sch->lpm |= (0x80 >> chp);
763 device_trigger_reprobe(sch);
764 else if (sch->driver && sch->driver->verify)
765 sch->driver->verify(&sch->dev);
768 sch->opm &= ~(0x80 >> chp);
769 sch->lpm &= ~(0x80 >> chp);
770 if (check_for_io_on_path(sch, chp)) {
771 if (device_is_online(sch))
772 /* Path verification is done after killing. */
775 /* Kill and retry internal I/O. */
776 terminate_internal_io(sch);
777 } else if (!sch->lpm) {
778 if (device_trigger_verify(sch) != 0) {
779 if (css_enqueue_subchannel_slow(sch->schid)) {
780 css_clear_subchannel_slow_list();
784 } else if (sch->driver && sch->driver->verify)
785 sch->driver->verify(&sch->dev);
788 spin_unlock_irqrestore(&sch->lock, flags);
792 s390_subchannel_vary_chpid_off(struct device *dev, void *data)
794 struct subchannel *sch;
797 sch = to_subchannel(dev);
800 __s390_subchannel_vary_chpid(sch, *chpid, 0);
805 s390_subchannel_vary_chpid_on(struct device *dev, void *data)
807 struct subchannel *sch;
810 sch = to_subchannel(dev);
813 __s390_subchannel_vary_chpid(sch, *chpid, 1);
818 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
821 struct subchannel *sch;
823 sch = get_subchannel_by_schid(schid);
825 put_device(&sch->dev);
828 if (stsch_err(schid, &schib))
831 /* Put it on the slow path. */
832 if (css_enqueue_subchannel_slow(schid)) {
833 css_clear_subchannel_slow_list();
841 * Function: s390_vary_chpid
842 * Varies the specified chpid online or offline
845 s390_vary_chpid( __u8 chpid, int on)
850 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
851 CIO_TRACE_EVENT( 2, dbf_text);
853 status = get_chp_status(chpid);
855 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
859 if (!on && !status) {
860 printk(KERN_ERR "chpid %x is already offline\n", chpid);
864 set_chp_logically_online(chpid, on);
867 * Redo PathVerification on the devices the chpid connects to
870 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
871 s390_subchannel_vary_chpid_on :
872 s390_subchannel_vary_chpid_off);
874 /* Scan for new devices on varied on path. */
875 for_each_subchannel(__s390_vary_chpid_on, NULL);
876 if (need_rescan || css_slow_subchannels_exist())
877 queue_work(slow_path_wq, &slow_path_work);
882 * Channel measurement related functions
885 chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off,
888 struct channel_path *chp;
891 chp = to_channelpath(container_of(kobj, struct device, kobj));
895 size = sizeof(struct cmg_chars);
899 if (off + count > size)
901 memcpy(buf, chp->cmg_chars + off, count);
905 static struct bin_attribute chp_measurement_chars_attr = {
907 .name = "measurement_chars",
909 .owner = THIS_MODULE,
911 .size = sizeof(struct cmg_chars),
912 .read = chp_measurement_chars_read,
916 chp_measurement_copy_block(struct cmg_entry *buf,
917 struct channel_subsystem *css, int chpid)
920 struct cmg_entry *entry, reference_buf;
924 area = css->cub_addr1;
927 area = css->cub_addr2;
930 entry = area + (idx * sizeof(struct cmg_entry));
932 memcpy(buf, entry, sizeof(*entry));
933 memcpy(&reference_buf, entry, sizeof(*entry));
934 } while (reference_buf.values[0] != buf->values[0]);
938 chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
940 struct channel_path *chp;
941 struct channel_subsystem *css;
944 chp = to_channelpath(container_of(kobj, struct device, kobj));
945 css = to_css(chp->dev.parent);
947 size = sizeof(struct cmg_entry);
949 /* Only allow single reads. */
950 if (off || count < size)
952 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
957 static struct bin_attribute chp_measurement_attr = {
959 .name = "measurement",
961 .owner = THIS_MODULE,
963 .size = sizeof(struct cmg_entry),
964 .read = chp_measurement_read,
968 chsc_remove_chp_cmg_attr(struct channel_path *chp)
970 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr);
971 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr);
975 chsc_add_chp_cmg_attr(struct channel_path *chp)
979 ret = sysfs_create_bin_file(&chp->dev.kobj,
980 &chp_measurement_chars_attr);
983 ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr);
985 sysfs_remove_bin_file(&chp->dev.kobj,
986 &chp_measurement_chars_attr);
991 chsc_remove_cmg_attr(struct channel_subsystem *css)
995 for (i = 0; i <= __MAX_CHPID; i++) {
998 chsc_remove_chp_cmg_attr(css->chps[i]);
1003 chsc_add_cmg_attr(struct channel_subsystem *css)
1008 for (i = 0; i <= __MAX_CHPID; i++) {
1011 ret = chsc_add_chp_cmg_attr(css->chps[i]);
1017 for (--i; i >= 0; i--) {
1020 chsc_remove_chp_cmg_attr(css->chps[i]);
1027 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1030 struct chsc_header request;
1031 u32 operation_code : 2;
1040 struct chsc_header response;
1049 secm_area->request.length = 0x0050;
1050 secm_area->request.code = 0x0016;
1052 secm_area->key = PAGE_DEFAULT_KEY;
1053 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
1054 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
1056 secm_area->operation_code = enable ? 0 : 1;
1058 ccode = chsc(secm_area);
1060 return (ccode == 3) ? -ENODEV : -EBUSY;
1062 switch (secm_area->response.code) {
1063 case 0x0001: /* Success. */
1066 case 0x0003: /* Invalid block. */
1067 case 0x0007: /* Invalid format. */
1068 case 0x0008: /* Other invalid block. */
1069 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1072 case 0x0004: /* Command not provided in model. */
1073 CIO_CRW_EVENT(2, "Model does not provide secm\n");
1076 case 0x0102: /* cub adresses incorrect */
1077 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
1080 case 0x0103: /* key error */
1081 CIO_CRW_EVENT(2, "Access key error in secm\n");
1084 case 0x0105: /* error while starting */
1085 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
1089 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1090 secm_area->response.code);
1097 chsc_secm(struct channel_subsystem *css, int enable)
1102 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1106 mutex_lock(&css->mutex);
1107 if (enable && !css->cm_enabled) {
1108 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1109 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1110 if (!css->cub_addr1 || !css->cub_addr2) {
1111 free_page((unsigned long)css->cub_addr1);
1112 free_page((unsigned long)css->cub_addr2);
1113 free_page((unsigned long)secm_area);
1114 mutex_unlock(&css->mutex);
1118 ret = __chsc_do_secm(css, enable, secm_area);
1120 css->cm_enabled = enable;
1121 if (css->cm_enabled) {
1122 ret = chsc_add_cmg_attr(css);
1124 memset(secm_area, 0, PAGE_SIZE);
1125 __chsc_do_secm(css, 0, secm_area);
1126 css->cm_enabled = 0;
1129 chsc_remove_cmg_attr(css);
1131 if (enable && !css->cm_enabled) {
1132 free_page((unsigned long)css->cub_addr1);
1133 free_page((unsigned long)css->cub_addr2);
1135 mutex_unlock(&css->mutex);
1136 free_page((unsigned long)secm_area);
1141 * Files for the channel path entries.
1144 chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
1146 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1150 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
1151 sprintf(buf, "offline\n"));
1155 chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1157 struct channel_path *cp = container_of(dev, struct channel_path, dev);
1162 num_args = sscanf(buf, "%5s", cmd);
1166 if (!strnicmp(cmd, "on", 2))
1167 error = s390_vary_chpid(cp->id, 1);
1168 else if (!strnicmp(cmd, "off", 3))
1169 error = s390_vary_chpid(cp->id, 0);
1173 return error < 0 ? error : count;
1177 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
1180 chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1182 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1186 return sprintf(buf, "%x\n", chp->desc.desc);
1189 static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
1192 chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf)
1194 struct channel_path *chp = to_channelpath(dev);
1198 if (chp->cmg == -1) /* channel measurements not available */
1199 return sprintf(buf, "unknown\n");
1200 return sprintf(buf, "%x\n", chp->cmg);
1203 static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
1206 chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
1208 struct channel_path *chp = to_channelpath(dev);
1212 if (chp->shared == -1) /* channel measurements not available */
1213 return sprintf(buf, "unknown\n");
1214 return sprintf(buf, "%x\n", chp->shared);
1217 static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
1219 static struct attribute * chp_attrs[] = {
1220 &dev_attr_status.attr,
1221 &dev_attr_type.attr,
1223 &dev_attr_shared.attr,
1227 static struct attribute_group chp_attr_group = {
1232 chp_release(struct device *dev)
1234 struct channel_path *cp;
1236 cp = container_of(dev, struct channel_path, dev);
1241 chsc_determine_channel_path_description(int chpid,
1242 struct channel_path_desc *desc)
1247 struct chsc_header request;
1249 u32 first_chpid : 8;
1253 struct chsc_header response;
1255 struct channel_path_desc desc;
1258 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1262 scpd_area->request.length = 0x0010;
1263 scpd_area->request.code = 0x0002;
1265 scpd_area->first_chpid = chpid;
1266 scpd_area->last_chpid = chpid;
1268 ccode = chsc(scpd_area);
1270 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1274 switch (scpd_area->response.code) {
1275 case 0x0001: /* Success. */
1276 memcpy(desc, &scpd_area->desc,
1277 sizeof(struct channel_path_desc));
1280 case 0x0003: /* Invalid block. */
1281 case 0x0007: /* Invalid format. */
1282 case 0x0008: /* Other invalid block. */
1283 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1286 case 0x0004: /* Command not provided in model. */
1287 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
1291 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1292 scpd_area->response.code);
1296 free_page((unsigned long)scpd_area);
1301 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1302 struct cmg_chars *chars)
1307 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
1309 if (chp->cmg_chars) {
1311 struct cmg_chars *cmg_chars;
1313 cmg_chars = chp->cmg_chars;
1314 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
1315 mask = 0x80 >> (i + 3);
1317 cmg_chars->values[i] = chars->values[i];
1319 cmg_chars->values[i] = 0;
1324 /* No cmg-dependent data. */
1330 chsc_get_channel_measurement_chars(struct channel_path *chp)
1335 struct chsc_header request;
1337 u32 first_chpid : 8;
1341 struct chsc_header response;
1352 u32 data[NR_MEASUREMENT_CHARS];
1355 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1359 scmc_area->request.length = 0x0010;
1360 scmc_area->request.code = 0x0022;
1362 scmc_area->first_chpid = chp->id;
1363 scmc_area->last_chpid = chp->id;
1365 ccode = chsc(scmc_area);
1367 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1371 switch (scmc_area->response.code) {
1372 case 0x0001: /* Success. */
1373 if (!scmc_area->not_valid) {
1374 chp->cmg = scmc_area->cmg;
1375 chp->shared = scmc_area->shared;
1376 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1377 (struct cmg_chars *)
1385 case 0x0003: /* Invalid block. */
1386 case 0x0007: /* Invalid format. */
1387 case 0x0008: /* Invalid bit combination. */
1388 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1391 case 0x0004: /* Command not provided. */
1392 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
1396 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1397 scmc_area->response.code);
1401 free_page((unsigned long)scmc_area);
1406 * Entries for chpids on the system bus.
1407 * This replaces /proc/chpids.
1410 new_channel_path(int chpid)
1412 struct channel_path *chp;
1415 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
1419 /* fill in status, etc. */
1422 chp->dev.parent = &css[0]->device;
1423 chp->dev.release = chp_release;
1424 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1426 /* Obtain channel path description and fill it in. */
1427 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1430 /* Get channel-measurement characteristics. */
1431 if (css_characteristics_avail && css_chsc_characteristics.scmc
1432 && css_chsc_characteristics.secm) {
1433 ret = chsc_get_channel_measurement_chars(chp);
1437 static int msg_done;
1440 printk(KERN_WARNING "cio: Channel measurements not "
1441 "available, continuing.\n");
1447 /* make it known to the system */
1448 ret = device_register(&chp->dev);
1450 printk(KERN_WARNING "%s: could not register %02x\n",
1454 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1456 device_unregister(&chp->dev);
1459 mutex_lock(&css[0]->mutex);
1460 if (css[0]->cm_enabled) {
1461 ret = chsc_add_chp_cmg_attr(chp);
1463 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
1464 device_unregister(&chp->dev);
1465 mutex_unlock(&css[0]->mutex);
1469 css[0]->chps[chpid] = chp;
1470 mutex_unlock(&css[0]->mutex);
1478 chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1480 struct channel_path *chp;
1481 struct channel_path_desc *desc;
1483 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1486 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1489 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1494 chsc_alloc_sei_area(void)
1496 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1498 printk(KERN_WARNING"Can't allocate page for processing of " \
1499 "chsc machine checks!\n");
1500 return (sei_page ? 0 : -ENOMEM);
1504 chsc_enable_facility(int operation_code)
1508 struct chsc_header request;
1515 u32 operation_data_area[252];
1516 struct chsc_header response;
1522 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1525 sda_area->request.length = 0x0400;
1526 sda_area->request.code = 0x0031;
1527 sda_area->operation_code = operation_code;
1529 ret = chsc(sda_area);
1531 ret = (ret == 3) ? -ENODEV : -EBUSY;
1534 switch (sda_area->response.code) {
1535 case 0x0001: /* everything ok */
1538 case 0x0003: /* invalid request block */
1542 case 0x0004: /* command not provided */
1543 case 0x0101: /* facility not provided */
1546 default: /* something went wrong */
1550 free_page((unsigned long)sda_area);
1554 subsys_initcall(chsc_alloc_sei_area);
1556 struct css_general_char css_general_characteristics;
1557 struct css_chsc_char css_chsc_characteristics;
1560 chsc_determine_css_characteristics(void)
1564 struct chsc_header request;
1568 struct chsc_header response;
1570 u32 general_char[510];
1574 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1576 printk(KERN_WARNING"cio: Was not able to determine available" \
1577 "CHSCs due to no memory.\n");
1581 scsc_area->request.length = 0x0010;
1582 scsc_area->request.code = 0x0010;
1584 result = chsc(scsc_area);
1586 printk(KERN_WARNING"cio: Was not able to determine " \
1587 "available CHSCs, cc=%i.\n", result);
1592 if (scsc_area->response.code != 1) {
1593 printk(KERN_WARNING"cio: Was not able to determine " \
1594 "available CHSCs.\n");
1598 memcpy(&css_general_characteristics, scsc_area->general_char,
1599 sizeof(css_general_characteristics));
1600 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1601 sizeof(css_chsc_characteristics));
1603 free_page ((unsigned long) scsc_area);
1607 EXPORT_SYMBOL_GPL(css_general_characteristics);
1608 EXPORT_SYMBOL_GPL(css_chsc_characteristics);