2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com)
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
18 #include <asm/chpid.h>
22 #include "cio_debug.h"
27 static void *sei_page;
29 /* FIXME: this is _always_ called for every subchannel. shouldn't we
30 * process more than one at a time? */
32 chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
37 struct chsc_header request;
41 u16 f_sch; /* first subchannel */
43 u16 l_sch; /* last subchannel */
45 struct chsc_header response;
49 u8 st : 3; /* subchannel type */
51 u8 unit_addr; /* unit address */
52 u16 devno; /* device number */
55 u16 sch; /* subchannel */
56 u8 chpid[8]; /* chpids 0-7 */
57 u16 fla[8]; /* full link addresses 0-7 */
58 } __attribute__ ((packed)) *ssd_area;
62 ssd_area->request.length = 0x0010;
63 ssd_area->request.code = 0x0004;
65 ssd_area->ssid = sch->schid.ssid;
66 ssd_area->f_sch = sch->schid.sch_no;
67 ssd_area->l_sch = sch->schid.sch_no;
69 ccode = chsc(ssd_area);
71 pr_debug("chsc returned with ccode = %d\n", ccode);
72 return (ccode == 3) ? -ENODEV : -EBUSY;
75 switch (ssd_area->response.code) {
76 case 0x0001: /* everything ok */
79 CIO_CRW_EVENT(2, "Invalid command!\n");
82 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
85 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
88 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
89 ssd_area->response.code);
94 * ssd_area->st stores the type of the detected
95 * subchannel, with the following definitions:
97 * 0: I/O subchannel: All fields have meaning
98 * 1: CHSC subchannel: Only sch_val, st and sch
100 * 2: Message subchannel: All fields except unit_addr
102 * 3: ADM subchannel: Only sch_val, st and sch
105 * Other types are currently undefined.
107 if (ssd_area->st > 3) { /* uhm, that looks strange... */
108 CIO_CRW_EVENT(0, "Strange subchannel type %d"
109 " for sch 0.%x.%04x\n", ssd_area->st,
110 sch->schid.ssid, sch->schid.sch_no);
112 * There may have been a new subchannel type defined in the
113 * time since this code was written; since we don't know which
114 * fields have meaning and what to do with it we just jump out
118 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
119 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
120 sch->schid.ssid, sch->schid.sch_no,
123 sch->ssd_info.valid = 1;
124 sch->ssd_info.type = ssd_area->st;
127 if (ssd_area->st == 0 || ssd_area->st == 2) {
128 for (j = 0; j < 8; j++) {
129 if (!((0x80 >> j) & ssd_area->path_mask &
130 ssd_area->fla_valid_mask))
132 sch->ssd_info.chpid[j] = ssd_area->chpid[j];
133 sch->ssd_info.fla[j] = ssd_area->fla[j];
140 css_get_ssd_info(struct subchannel *sch)
145 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
148 spin_lock_irq(sch->lock);
149 ret = chsc_get_sch_desc_irq(sch, page);
151 static int cio_chsc_err_msg;
153 if (!cio_chsc_err_msg) {
155 "chsc_get_sch_descriptions:"
156 " Error %d while doing chsc; "
157 "processing some machine checks may "
159 cio_chsc_err_msg = 1;
162 spin_unlock_irq(sch->lock);
163 free_page((unsigned long)page);
169 /* Allocate channel path structures, if needed. */
170 for (j = 0; j < 8; j++) {
172 chpid.id = sch->ssd_info.chpid[j];
173 if ((sch->schib.pmcw.pim & mask) &&
174 !chp_is_registered(chpid))
182 s390_subchannel_remove_chpid(struct device *dev, void *data)
186 struct subchannel *sch;
187 struct chp_id *chpid;
190 sch = to_subchannel(dev);
192 for (j = 0; j < 8; j++) {
194 if ((sch->schib.pmcw.pim & mask) &&
195 (sch->schib.pmcw.chpid[j] == chpid->id))
201 spin_lock_irq(sch->lock);
203 stsch(sch->schid, &schib);
206 memcpy(&sch->schib, &schib, sizeof(struct schib));
207 /* Check for single path devices. */
208 if (sch->schib.pmcw.pim == 0x80)
211 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
212 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
213 (sch->schib.pmcw.lpum == mask)) {
219 /* Request retry of internal operation. */
220 device_set_intretry(sch);
222 if (sch->driver && sch->driver->termination)
223 sch->driver->termination(&sch->dev);
227 /* trigger path verification. */
228 if (sch->driver && sch->driver->verify)
229 sch->driver->verify(&sch->dev);
230 else if (sch->lpm == mask)
233 spin_unlock_irq(sch->lock);
236 spin_unlock_irq(sch->lock);
238 if (css_enqueue_subchannel_slow(sch->schid)) {
239 css_clear_subchannel_slow_list();
245 void chsc_chp_offline(struct chp_id chpid)
249 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
250 CIO_TRACE_EVENT(2, dbf_txt);
252 if (chp_get_status(chpid) <= 0)
254 bus_for_each_dev(&css_bus_type, NULL, &chpid,
255 s390_subchannel_remove_chpid);
257 if (need_rescan || css_slow_subchannels_exist())
258 queue_work(slow_path_wq, &slow_path_work);
261 struct res_acc_data {
267 static int s390_process_res_acc_sch(struct res_acc_data *res_data,
268 struct subchannel *sch)
275 for (chp = 0; chp <= 7; chp++)
277 * check if chpid is in information updated by ssd
279 if (sch->ssd_info.valid &&
280 sch->ssd_info.chpid[chp] == res_data->chpid.id &&
281 (sch->ssd_info.fla[chp] & res_data->fla_mask)
291 * Do a stsch to update our subchannel structure with the
292 * new path information and eventually check for logically
295 ccode = stsch(sch->schid, &sch->schib);
303 s390_process_res_acc_new_sch(struct subchannel_id schid)
308 * We don't know the device yet, but since a path
309 * may be available now to the device we'll have
310 * to do recognition again.
311 * Since we don't have any idea about which chpid
312 * that beast may be on we'll have to do a stsch
313 * on all devices, grr...
315 if (stsch_err(schid, &schib))
317 return need_rescan ? -EAGAIN : -ENXIO;
319 /* Put it on the slow path. */
320 ret = css_enqueue_subchannel_slow(schid);
322 css_clear_subchannel_slow_list();
330 __s390_process_res_acc(struct subchannel_id schid, void *data)
332 int chp_mask, old_lpm;
333 struct res_acc_data *res_data;
334 struct subchannel *sch;
337 sch = get_subchannel_by_schid(schid);
339 /* Check if a subchannel is newly available. */
340 return s390_process_res_acc_new_sch(schid);
342 spin_lock_irq(sch->lock);
344 chp_mask = s390_process_res_acc_sch(res_data, sch);
347 spin_unlock_irq(sch->lock);
348 put_device(&sch->dev);
352 sch->lpm = ((sch->schib.pmcw.pim &
353 sch->schib.pmcw.pam &
355 | chp_mask) & sch->opm;
356 if (!old_lpm && sch->lpm)
357 device_trigger_reprobe(sch);
358 else if (sch->driver && sch->driver->verify)
359 sch->driver->verify(&sch->dev);
361 spin_unlock_irq(sch->lock);
362 put_device(&sch->dev);
368 s390_process_res_acc (struct res_acc_data *res_data)
373 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
375 CIO_TRACE_EVENT( 2, dbf_txt);
376 if (res_data->fla != 0) {
377 sprintf(dbf_txt, "fla%x", res_data->fla);
378 CIO_TRACE_EVENT( 2, dbf_txt);
382 * I/O resources may have become accessible.
383 * Scan through all subchannels that may be concerned and
384 * do a validation on those.
385 * The more information we have (info), the less scanning
386 * will we have to do.
388 rc = for_each_subchannel(__s390_process_res_acc, res_data);
389 if (css_slow_subchannels_exist())
391 else if (rc != -EAGAIN)
397 __get_chpid_from_lir(void *data)
403 /* incident-node descriptor */
405 /* attached-node descriptor */
407 /* incident-specific information */
409 } __attribute__ ((packed)) *lir;
413 /* NULL link incident record */
415 if (!(lir->indesc[0]&0xc0000000))
416 /* node descriptor not valid */
418 if (!(lir->indesc[0]&0x10000000))
419 /* don't handle device-type nodes - FIXME */
421 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
423 return (u16) (lir->indesc[0]&0x000000ff);
426 struct chsc_sei_area {
427 struct chsc_header request;
431 struct chsc_header response;
434 u8 vf; /* validity flags */
435 u8 rs; /* reporting source */
436 u8 cc; /* content code */
437 u16 fla; /* full link address */
438 u16 rsid; /* reporting source id */
441 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
442 /* ccdf has to be big enough for a link-incident record */
443 } __attribute__ ((packed));
445 static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
450 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
451 sei_area->rs, sei_area->rsid);
452 if (sei_area->rs != 4)
454 id = __get_chpid_from_lir(sei_area->ccdf);
456 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
460 chsc_chp_offline(chpid);
466 static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
468 struct res_acc_data res_data;
473 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
474 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
475 if (sei_area->rs != 4)
478 chpid.id = sei_area->rsid;
479 /* allocate a new channel path structure, if needed */
480 status = chp_get_status(chpid);
485 memset(&res_data, 0, sizeof(struct res_acc_data));
486 res_data.chpid = chpid;
487 if ((sei_area->vf & 0xc0) != 0) {
488 res_data.fla = sei_area->fla;
489 if ((sei_area->vf & 0xc0) == 0xc0)
490 /* full link address */
491 res_data.fla_mask = 0xffff;
494 res_data.fla_mask = 0xff00;
496 rc = s390_process_res_acc(&res_data);
501 struct chp_config_data {
507 static int chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
509 struct chp_config_data *data;
513 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
514 if (sei_area->rs != 0)
516 data = (struct chp_config_data *) &(sei_area->ccdf);
518 for (num = 0; num <= __MAX_CHPID; num++) {
519 if (!chp_test_bit(data->map, num))
522 printk(KERN_WARNING "cio: processing configure event %d for "
523 "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
526 chp_cfg_schedule(chpid, 1);
529 chp_cfg_schedule(chpid, 0);
532 chp_cfg_cancel_deconfigure(chpid);
540 static int chsc_process_sei(struct chsc_sei_area *sei_area)
544 /* Check if we might have lost some information. */
545 if (sei_area->flags & 0x40)
546 CIO_CRW_EVENT(2, "chsc: event overflow\n");
547 /* which kind of information was stored? */
549 switch (sei_area->cc) {
550 case 1: /* link incident*/
551 rc = chsc_process_sei_link_incident(sei_area);
553 case 2: /* i/o resource accessibiliy */
554 rc = chsc_process_sei_res_acc(sei_area);
556 case 8: /* channel-path-configuration notification */
557 rc = chsc_process_sei_chp_config(sei_area);
559 default: /* other stuff */
560 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
568 int chsc_process_crw(void)
570 struct chsc_sei_area *sei_area;
576 /* Access to sei_page is serialized through machine check handler
577 * thread, so no need for locking. */
580 CIO_TRACE_EVENT( 2, "prcss");
583 memset(sei_area, 0, sizeof(*sei_area));
584 sei_area->request.length = 0x0010;
585 sei_area->request.code = 0x000e;
589 if (sei_area->response.code == 0x0001) {
590 CIO_CRW_EVENT(4, "chsc: sei successful\n");
591 rc = chsc_process_sei(sei_area);
595 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
596 sei_area->response.code);
600 } while (sei_area->flags & 0x80);
606 __chp_add_new_sch(struct subchannel_id schid)
611 if (stsch_err(schid, &schib))
613 return need_rescan ? -EAGAIN : -ENXIO;
615 /* Put it on the slow path. */
616 ret = css_enqueue_subchannel_slow(schid);
618 css_clear_subchannel_slow_list();
627 __chp_add(struct subchannel_id schid, void *data)
630 struct chp_id *chpid;
631 struct subchannel *sch;
634 sch = get_subchannel_by_schid(schid);
636 /* Check if the subchannel is now available. */
637 return __chp_add_new_sch(schid);
638 spin_lock_irq(sch->lock);
639 for (i=0; i<8; i++) {
641 if ((sch->schib.pmcw.pim & mask) &&
642 (sch->schib.pmcw.chpid[i] == chpid->id)) {
643 if (stsch(sch->schid, &sch->schib) != 0) {
645 spin_unlock_irq(sch->lock);
652 spin_unlock_irq(sch->lock);
655 sch->lpm = ((sch->schib.pmcw.pim &
656 sch->schib.pmcw.pam &
660 if (sch->driver && sch->driver->verify)
661 sch->driver->verify(&sch->dev);
663 spin_unlock_irq(sch->lock);
664 put_device(&sch->dev);
668 int chsc_chp_online(struct chp_id chpid)
673 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
674 CIO_TRACE_EVENT(2, dbf_txt);
676 if (chp_get_status(chpid) == 0)
678 rc = for_each_subchannel(__chp_add, &chpid);
679 if (css_slow_subchannels_exist())
686 static int check_for_io_on_path(struct subchannel *sch, int index)
690 cc = stsch(sch->schid, &sch->schib);
693 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
698 static void terminate_internal_io(struct subchannel *sch)
700 if (cio_clear(sch)) {
701 /* Recheck device in case clear failed. */
703 if (device_trigger_verify(sch) != 0) {
704 if(css_enqueue_subchannel_slow(sch->schid)) {
705 css_clear_subchannel_slow_list();
711 /* Request retry of internal operation. */
712 device_set_intretry(sch);
714 if (sch->driver && sch->driver->termination)
715 sch->driver->termination(&sch->dev);
718 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
719 struct chp_id chpid, int on)
724 if (!sch->ssd_info.valid)
727 spin_lock_irqsave(sch->lock, flags);
729 for (chp = 0; chp < 8; chp++) {
730 if (sch->ssd_info.chpid[chp] != chpid.id)
734 sch->opm |= (0x80 >> chp);
735 sch->lpm |= (0x80 >> chp);
737 device_trigger_reprobe(sch);
738 else if (sch->driver && sch->driver->verify)
739 sch->driver->verify(&sch->dev);
742 sch->opm &= ~(0x80 >> chp);
743 sch->lpm &= ~(0x80 >> chp);
744 if (check_for_io_on_path(sch, chp)) {
745 if (device_is_online(sch))
746 /* Path verification is done after killing. */
749 /* Kill and retry internal I/O. */
750 terminate_internal_io(sch);
751 } else if (!sch->lpm) {
752 if (device_trigger_verify(sch) != 0) {
753 if (css_enqueue_subchannel_slow(sch->schid)) {
754 css_clear_subchannel_slow_list();
758 } else if (sch->driver && sch->driver->verify)
759 sch->driver->verify(&sch->dev);
762 spin_unlock_irqrestore(sch->lock, flags);
765 static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
767 struct subchannel *sch;
768 struct chp_id *chpid;
770 sch = to_subchannel(dev);
773 __s390_subchannel_vary_chpid(sch, *chpid, 0);
777 static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
779 struct subchannel *sch;
780 struct chp_id *chpid;
782 sch = to_subchannel(dev);
785 __s390_subchannel_vary_chpid(sch, *chpid, 1);
790 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
793 struct subchannel *sch;
795 sch = get_subchannel_by_schid(schid);
797 put_device(&sch->dev);
800 if (stsch_err(schid, &schib))
803 /* Put it on the slow path. */
804 if (css_enqueue_subchannel_slow(schid)) {
805 css_clear_subchannel_slow_list();
813 * chsc_chp_vary - propagate channel-path vary operation to subchannels
814 * @chpid: channl-path ID
815 * @on: non-zero for vary online, zero for vary offline
817 int chsc_chp_vary(struct chp_id chpid, int on)
820 * Redo PathVerification on the devices the chpid connects to
823 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
824 s390_subchannel_vary_chpid_on :
825 s390_subchannel_vary_chpid_off);
827 /* Scan for new devices on varied on path. */
828 for_each_subchannel(__s390_vary_chpid_on, NULL);
829 if (need_rescan || css_slow_subchannels_exist())
830 queue_work(slow_path_wq, &slow_path_work);
835 chsc_remove_cmg_attr(struct channel_subsystem *css)
839 for (i = 0; i <= __MAX_CHPID; i++) {
842 chp_remove_cmg_attr(css->chps[i]);
847 chsc_add_cmg_attr(struct channel_subsystem *css)
852 for (i = 0; i <= __MAX_CHPID; i++) {
855 ret = chp_add_cmg_attr(css->chps[i]);
861 for (--i; i >= 0; i--) {
864 chp_remove_cmg_attr(css->chps[i]);
870 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
873 struct chsc_header request;
874 u32 operation_code : 2;
883 struct chsc_header response;
888 } __attribute__ ((packed)) *secm_area;
892 secm_area->request.length = 0x0050;
893 secm_area->request.code = 0x0016;
895 secm_area->key = PAGE_DEFAULT_KEY;
896 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
897 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
899 secm_area->operation_code = enable ? 0 : 1;
901 ccode = chsc(secm_area);
903 return (ccode == 3) ? -ENODEV : -EBUSY;
905 switch (secm_area->response.code) {
906 case 0x0001: /* Success. */
909 case 0x0003: /* Invalid block. */
910 case 0x0007: /* Invalid format. */
911 case 0x0008: /* Other invalid block. */
912 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
915 case 0x0004: /* Command not provided in model. */
916 CIO_CRW_EVENT(2, "Model does not provide secm\n");
919 case 0x0102: /* cub adresses incorrect */
920 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
923 case 0x0103: /* key error */
924 CIO_CRW_EVENT(2, "Access key error in secm\n");
927 case 0x0105: /* error while starting */
928 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
932 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
933 secm_area->response.code);
940 chsc_secm(struct channel_subsystem *css, int enable)
945 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
949 mutex_lock(&css->mutex);
950 if (enable && !css->cm_enabled) {
951 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
952 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
953 if (!css->cub_addr1 || !css->cub_addr2) {
954 free_page((unsigned long)css->cub_addr1);
955 free_page((unsigned long)css->cub_addr2);
956 free_page((unsigned long)secm_area);
957 mutex_unlock(&css->mutex);
961 ret = __chsc_do_secm(css, enable, secm_area);
963 css->cm_enabled = enable;
964 if (css->cm_enabled) {
965 ret = chsc_add_cmg_attr(css);
967 memset(secm_area, 0, PAGE_SIZE);
968 __chsc_do_secm(css, 0, secm_area);
972 chsc_remove_cmg_attr(css);
974 if (enable && !css->cm_enabled) {
975 free_page((unsigned long)css->cub_addr1);
976 free_page((unsigned long)css->cub_addr2);
978 mutex_unlock(&css->mutex);
979 free_page((unsigned long)secm_area);
983 int chsc_determine_channel_path_description(struct chp_id chpid,
984 struct channel_path_desc *desc)
989 struct chsc_header request;
995 struct chsc_header response;
997 struct channel_path_desc desc;
998 } __attribute__ ((packed)) *scpd_area;
1000 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1004 scpd_area->request.length = 0x0010;
1005 scpd_area->request.code = 0x0002;
1007 scpd_area->first_chpid = chpid.id;
1008 scpd_area->last_chpid = chpid.id;
1010 ccode = chsc(scpd_area);
1012 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1016 switch (scpd_area->response.code) {
1017 case 0x0001: /* Success. */
1018 memcpy(desc, &scpd_area->desc,
1019 sizeof(struct channel_path_desc));
1022 case 0x0003: /* Invalid block. */
1023 case 0x0007: /* Invalid format. */
1024 case 0x0008: /* Other invalid block. */
1025 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1028 case 0x0004: /* Command not provided in model. */
1029 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
1033 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1034 scpd_area->response.code);
1038 free_page((unsigned long)scpd_area);
1043 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1044 struct cmg_chars *chars)
1049 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
1051 if (chp->cmg_chars) {
1053 struct cmg_chars *cmg_chars;
1055 cmg_chars = chp->cmg_chars;
1056 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
1057 mask = 0x80 >> (i + 3);
1059 cmg_chars->values[i] = chars->values[i];
1061 cmg_chars->values[i] = 0;
1066 /* No cmg-dependent data. */
1071 int chsc_get_channel_measurement_chars(struct channel_path *chp)
1076 struct chsc_header request;
1078 u32 first_chpid : 8;
1082 struct chsc_header response;
1093 u32 data[NR_MEASUREMENT_CHARS];
1094 } __attribute__ ((packed)) *scmc_area;
1096 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1100 scmc_area->request.length = 0x0010;
1101 scmc_area->request.code = 0x0022;
1103 scmc_area->first_chpid = chp->chpid.id;
1104 scmc_area->last_chpid = chp->chpid.id;
1106 ccode = chsc(scmc_area);
1108 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1112 switch (scmc_area->response.code) {
1113 case 0x0001: /* Success. */
1114 if (!scmc_area->not_valid) {
1115 chp->cmg = scmc_area->cmg;
1116 chp->shared = scmc_area->shared;
1117 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1118 (struct cmg_chars *)
1126 case 0x0003: /* Invalid block. */
1127 case 0x0007: /* Invalid format. */
1128 case 0x0008: /* Invalid bit combination. */
1129 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1132 case 0x0004: /* Command not provided. */
1133 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
1137 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1138 scmc_area->response.code);
1142 free_page((unsigned long)scmc_area);
1147 chsc_alloc_sei_area(void)
1149 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1151 printk(KERN_WARNING"Can't allocate page for processing of " \
1152 "chsc machine checks!\n");
1153 return (sei_page ? 0 : -ENOMEM);
1157 chsc_enable_facility(int operation_code)
1161 struct chsc_header request;
1168 u32 operation_data_area[252];
1169 struct chsc_header response;
1173 } __attribute__ ((packed)) *sda_area;
1175 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1178 sda_area->request.length = 0x0400;
1179 sda_area->request.code = 0x0031;
1180 sda_area->operation_code = operation_code;
1182 ret = chsc(sda_area);
1184 ret = (ret == 3) ? -ENODEV : -EBUSY;
1187 switch (sda_area->response.code) {
1188 case 0x0001: /* everything ok */
1191 case 0x0003: /* invalid request block */
1195 case 0x0004: /* command not provided */
1196 case 0x0101: /* facility not provided */
1199 default: /* something went wrong */
1203 free_page((unsigned long)sda_area);
1207 subsys_initcall(chsc_alloc_sei_area);
1209 struct css_general_char css_general_characteristics;
1210 struct css_chsc_char css_chsc_characteristics;
1213 chsc_determine_css_characteristics(void)
1217 struct chsc_header request;
1221 struct chsc_header response;
1223 u32 general_char[510];
1225 } __attribute__ ((packed)) *scsc_area;
1227 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1229 printk(KERN_WARNING"cio: Was not able to determine available" \
1230 "CHSCs due to no memory.\n");
1234 scsc_area->request.length = 0x0010;
1235 scsc_area->request.code = 0x0010;
1237 result = chsc(scsc_area);
1239 printk(KERN_WARNING"cio: Was not able to determine " \
1240 "available CHSCs, cc=%i.\n", result);
1245 if (scsc_area->response.code != 1) {
1246 printk(KERN_WARNING"cio: Was not able to determine " \
1247 "available CHSCs.\n");
1251 memcpy(&css_general_characteristics, scsc_area->general_char,
1252 sizeof(css_general_characteristics));
1253 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1254 sizeof(css_chsc_characteristics));
1256 free_page ((unsigned long) scsc_area);
1260 EXPORT_SYMBOL_GPL(css_general_characteristics);
1261 EXPORT_SYMBOL_GPL(css_chsc_characteristics);