2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com)
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
18 #include <asm/chpid.h>
22 #include "cio_debug.h"
27 static void *sei_page;
29 struct chsc_ssd_area {
30 struct chsc_header request;
34 u16 f_sch; /* first subchannel */
36 u16 l_sch; /* last subchannel */
38 struct chsc_header response;
42 u8 st : 3; /* subchannel type */
44 u8 unit_addr; /* unit address */
45 u16 devno; /* device number */
48 u16 sch; /* subchannel */
49 u8 chpid[8]; /* chpids 0-7 */
50 u16 fla[8]; /* full link addresses 0-7 */
51 } __attribute__ ((packed));
53 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
56 struct chsc_ssd_area *ssd_area;
62 page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
65 ssd_area = (struct chsc_ssd_area *) page;
66 ssd_area->request.length = 0x0010;
67 ssd_area->request.code = 0x0004;
68 ssd_area->ssid = schid.ssid;
69 ssd_area->f_sch = schid.sch_no;
70 ssd_area->l_sch = schid.sch_no;
72 ccode = chsc(ssd_area);
75 ret = (ccode == 3) ? -ENODEV : -EBUSY;
78 if (ssd_area->response.code != 0x0001) {
79 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
80 schid.ssid, schid.sch_no,
81 ssd_area->response.code);
85 if (!ssd_area->sch_valid) {
91 memset(ssd, 0, sizeof(struct chsc_ssd_info));
92 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
93 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
95 ssd->path_mask = ssd_area->path_mask;
96 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
97 for (i = 0; i < 8; i++) {
99 if (ssd_area->path_mask & mask) {
100 chp_id_init(&ssd->chpid[i]);
101 ssd->chpid[i].id = ssd_area->chpid[i];
103 if (ssd_area->fla_valid_mask & mask)
104 ssd->fla[i] = ssd_area->fla[i];
111 static int check_for_io_on_path(struct subchannel *sch, int mask)
115 cc = stsch(sch->schid, &sch->schib);
118 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
123 static void terminate_internal_io(struct subchannel *sch)
125 if (cio_clear(sch)) {
126 /* Recheck device in case clear failed. */
128 if (device_trigger_verify(sch) != 0)
129 css_schedule_eval(sch->schid);
132 /* Request retry of internal operation. */
133 device_set_intretry(sch);
135 if (sch->driver && sch->driver->termination)
136 sch->driver->termination(sch);
140 s390_subchannel_remove_chpid(struct device *dev, void *data)
144 struct subchannel *sch;
145 struct chp_id *chpid;
148 sch = to_subchannel(dev);
150 for (j = 0; j < 8; j++) {
152 if ((sch->schib.pmcw.pim & mask) &&
153 (sch->schib.pmcw.chpid[j] == chpid->id))
159 spin_lock_irq(sch->lock);
161 stsch(sch->schid, &schib);
162 if (!css_sch_is_valid(&schib))
164 memcpy(&sch->schib, &schib, sizeof(struct schib));
165 /* Check for single path devices. */
166 if (sch->schib.pmcw.pim == 0x80)
169 if (check_for_io_on_path(sch, mask)) {
170 if (device_is_online(sch))
173 terminate_internal_io(sch);
174 /* Re-start path verification. */
175 if (sch->driver && sch->driver->verify)
176 sch->driver->verify(sch);
179 /* trigger path verification. */
180 if (sch->driver && sch->driver->verify)
181 sch->driver->verify(sch);
182 else if (sch->lpm == mask)
186 spin_unlock_irq(sch->lock);
191 spin_unlock_irq(sch->lock);
192 css_schedule_eval(sch->schid);
196 void chsc_chp_offline(struct chp_id chpid)
200 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
201 CIO_TRACE_EVENT(2, dbf_txt);
203 if (chp_get_status(chpid) <= 0)
205 bus_for_each_dev(&css_bus_type, NULL, &chpid,
206 s390_subchannel_remove_chpid);
210 s390_process_res_acc_new_sch(struct subchannel_id schid)
214 * We don't know the device yet, but since a path
215 * may be available now to the device we'll have
216 * to do recognition again.
217 * Since we don't have any idea about which chpid
218 * that beast may be on we'll have to do a stsch
219 * on all devices, grr...
221 if (stsch_err(schid, &schib))
225 /* Put it on the slow path. */
226 css_schedule_eval(schid);
230 struct res_acc_data {
236 static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
237 struct res_acc_data *data)
242 for (i = 0; i < 8; i++) {
244 if (!(ssd->path_mask & mask))
246 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
248 if ((ssd->fla_valid_mask & mask) &&
249 ((ssd->fla[i] & data->fla_mask) != data->fla))
257 __s390_process_res_acc(struct subchannel_id schid, void *data)
259 int chp_mask, old_lpm;
260 struct res_acc_data *res_data;
261 struct subchannel *sch;
264 sch = get_subchannel_by_schid(schid);
266 /* Check if a subchannel is newly available. */
267 return s390_process_res_acc_new_sch(schid);
269 spin_lock_irq(sch->lock);
270 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
273 if (stsch(sch->schid, &sch->schib))
276 sch->lpm = ((sch->schib.pmcw.pim &
277 sch->schib.pmcw.pam &
279 | chp_mask) & sch->opm;
280 if (!old_lpm && sch->lpm)
281 device_trigger_reprobe(sch);
282 else if (sch->driver && sch->driver->verify)
283 sch->driver->verify(sch);
285 spin_unlock_irq(sch->lock);
286 put_device(&sch->dev);
290 static void s390_process_res_acc (struct res_acc_data *res_data)
294 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
296 CIO_TRACE_EVENT( 2, dbf_txt);
297 if (res_data->fla != 0) {
298 sprintf(dbf_txt, "fla%x", res_data->fla);
299 CIO_TRACE_EVENT( 2, dbf_txt);
303 * I/O resources may have become accessible.
304 * Scan through all subchannels that may be concerned and
305 * do a validation on those.
306 * The more information we have (info), the less scanning
307 * will we have to do.
309 for_each_subchannel(__s390_process_res_acc, res_data);
313 __get_chpid_from_lir(void *data)
319 /* incident-node descriptor */
321 /* attached-node descriptor */
323 /* incident-specific information */
325 } __attribute__ ((packed)) *lir;
329 /* NULL link incident record */
331 if (!(lir->indesc[0]&0xc0000000))
332 /* node descriptor not valid */
334 if (!(lir->indesc[0]&0x10000000))
335 /* don't handle device-type nodes - FIXME */
337 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
339 return (u16) (lir->indesc[0]&0x000000ff);
342 struct chsc_sei_area {
343 struct chsc_header request;
347 struct chsc_header response;
350 u8 vf; /* validity flags */
351 u8 rs; /* reporting source */
352 u8 cc; /* content code */
353 u16 fla; /* full link address */
354 u16 rsid; /* reporting source id */
357 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
358 /* ccdf has to be big enough for a link-incident record */
359 } __attribute__ ((packed));
361 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
366 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
367 sei_area->rs, sei_area->rsid);
368 if (sei_area->rs != 4)
370 id = __get_chpid_from_lir(sei_area->ccdf);
372 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
376 chsc_chp_offline(chpid);
380 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
382 struct res_acc_data res_data;
386 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
387 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
388 if (sei_area->rs != 4)
391 chpid.id = sei_area->rsid;
392 /* allocate a new channel path structure, if needed */
393 status = chp_get_status(chpid);
398 memset(&res_data, 0, sizeof(struct res_acc_data));
399 res_data.chpid = chpid;
400 if ((sei_area->vf & 0xc0) != 0) {
401 res_data.fla = sei_area->fla;
402 if ((sei_area->vf & 0xc0) == 0xc0)
403 /* full link address */
404 res_data.fla_mask = 0xffff;
407 res_data.fla_mask = 0xff00;
409 s390_process_res_acc(&res_data);
412 struct chp_config_data {
418 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
420 struct chp_config_data *data;
424 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
425 if (sei_area->rs != 0)
427 data = (struct chp_config_data *) &(sei_area->ccdf);
429 for (num = 0; num <= __MAX_CHPID; num++) {
430 if (!chp_test_bit(data->map, num))
433 printk(KERN_WARNING "cio: processing configure event %d for "
434 "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
437 chp_cfg_schedule(chpid, 1);
440 chp_cfg_schedule(chpid, 0);
443 chp_cfg_cancel_deconfigure(chpid);
449 static void chsc_process_sei(struct chsc_sei_area *sei_area)
451 /* Check if we might have lost some information. */
452 if (sei_area->flags & 0x40) {
453 CIO_CRW_EVENT(2, "chsc: event overflow\n");
454 css_schedule_eval_all();
456 /* which kind of information was stored? */
457 switch (sei_area->cc) {
458 case 1: /* link incident*/
459 chsc_process_sei_link_incident(sei_area);
461 case 2: /* i/o resource accessibiliy */
462 chsc_process_sei_res_acc(sei_area);
464 case 8: /* channel-path-configuration notification */
465 chsc_process_sei_chp_config(sei_area);
467 default: /* other stuff */
468 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
474 void chsc_process_crw(void)
476 struct chsc_sei_area *sei_area;
480 /* Access to sei_page is serialized through machine check handler
481 * thread, so no need for locking. */
484 CIO_TRACE_EVENT( 2, "prcss");
486 memset(sei_area, 0, sizeof(*sei_area));
487 sei_area->request.length = 0x0010;
488 sei_area->request.code = 0x000e;
492 if (sei_area->response.code == 0x0001) {
493 CIO_CRW_EVENT(4, "chsc: sei successful\n");
494 chsc_process_sei(sei_area);
496 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
497 sei_area->response.code);
500 } while (sei_area->flags & 0x80);
504 __chp_add_new_sch(struct subchannel_id schid)
508 if (stsch_err(schid, &schib))
512 /* Put it on the slow path. */
513 css_schedule_eval(schid);
519 __chp_add(struct subchannel_id schid, void *data)
522 struct chp_id *chpid;
523 struct subchannel *sch;
526 sch = get_subchannel_by_schid(schid);
528 /* Check if the subchannel is now available. */
529 return __chp_add_new_sch(schid);
530 spin_lock_irq(sch->lock);
531 for (i=0; i<8; i++) {
533 if ((sch->schib.pmcw.pim & mask) &&
534 (sch->schib.pmcw.chpid[i] == chpid->id)) {
535 if (stsch(sch->schid, &sch->schib) != 0) {
537 spin_unlock_irq(sch->lock);
544 spin_unlock_irq(sch->lock);
547 sch->lpm = ((sch->schib.pmcw.pim &
548 sch->schib.pmcw.pam &
552 if (sch->driver && sch->driver->verify)
553 sch->driver->verify(sch);
555 spin_unlock_irq(sch->lock);
556 put_device(&sch->dev);
560 void chsc_chp_online(struct chp_id chpid)
564 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
565 CIO_TRACE_EVENT(2, dbf_txt);
567 if (chp_get_status(chpid) != 0)
568 for_each_subchannel(__chp_add, &chpid);
571 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
572 struct chp_id chpid, int on)
578 spin_lock_irqsave(sch->lock, flags);
580 for (chp = 0; chp < 8; chp++) {
582 if (!(sch->ssd_info.path_mask & mask))
584 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
591 device_trigger_reprobe(sch);
592 else if (sch->driver && sch->driver->verify)
593 sch->driver->verify(sch);
598 if (check_for_io_on_path(sch, mask)) {
599 if (device_is_online(sch))
600 /* Path verification is done after killing. */
603 /* Kill and retry internal I/O. */
604 terminate_internal_io(sch);
605 /* Re-start path verification. */
606 if (sch->driver && sch->driver->verify)
607 sch->driver->verify(sch);
609 } else if (!sch->lpm) {
610 if (device_trigger_verify(sch) != 0)
611 css_schedule_eval(sch->schid);
612 } else if (sch->driver && sch->driver->verify)
613 sch->driver->verify(sch);
616 spin_unlock_irqrestore(sch->lock, flags);
619 static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
621 struct subchannel *sch;
622 struct chp_id *chpid;
624 sch = to_subchannel(dev);
627 __s390_subchannel_vary_chpid(sch, *chpid, 0);
631 static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
633 struct subchannel *sch;
634 struct chp_id *chpid;
636 sch = to_subchannel(dev);
639 __s390_subchannel_vary_chpid(sch, *chpid, 1);
644 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
647 struct subchannel *sch;
649 sch = get_subchannel_by_schid(schid);
651 put_device(&sch->dev);
654 if (stsch_err(schid, &schib))
657 /* Put it on the slow path. */
658 css_schedule_eval(schid);
663 * chsc_chp_vary - propagate channel-path vary operation to subchannels
664 * @chpid: channl-path ID
665 * @on: non-zero for vary online, zero for vary offline
667 int chsc_chp_vary(struct chp_id chpid, int on)
670 * Redo PathVerification on the devices the chpid connects to
673 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
674 s390_subchannel_vary_chpid_on :
675 s390_subchannel_vary_chpid_off);
677 /* Scan for new devices on varied on path. */
678 for_each_subchannel(__s390_vary_chpid_on, NULL);
683 chsc_remove_cmg_attr(struct channel_subsystem *css)
687 for (i = 0; i <= __MAX_CHPID; i++) {
690 chp_remove_cmg_attr(css->chps[i]);
695 chsc_add_cmg_attr(struct channel_subsystem *css)
700 for (i = 0; i <= __MAX_CHPID; i++) {
703 ret = chp_add_cmg_attr(css->chps[i]);
709 for (--i; i >= 0; i--) {
712 chp_remove_cmg_attr(css->chps[i]);
718 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
721 struct chsc_header request;
722 u32 operation_code : 2;
731 struct chsc_header response;
736 } __attribute__ ((packed)) *secm_area;
740 secm_area->request.length = 0x0050;
741 secm_area->request.code = 0x0016;
743 secm_area->key = PAGE_DEFAULT_KEY;
744 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
745 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
747 secm_area->operation_code = enable ? 0 : 1;
749 ccode = chsc(secm_area);
751 return (ccode == 3) ? -ENODEV : -EBUSY;
753 switch (secm_area->response.code) {
754 case 0x0001: /* Success. */
757 case 0x0003: /* Invalid block. */
758 case 0x0007: /* Invalid format. */
759 case 0x0008: /* Other invalid block. */
760 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
763 case 0x0004: /* Command not provided in model. */
764 CIO_CRW_EVENT(2, "Model does not provide secm\n");
767 case 0x0102: /* cub adresses incorrect */
768 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
771 case 0x0103: /* key error */
772 CIO_CRW_EVENT(2, "Access key error in secm\n");
775 case 0x0105: /* error while starting */
776 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
780 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
781 secm_area->response.code);
788 chsc_secm(struct channel_subsystem *css, int enable)
793 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
797 mutex_lock(&css->mutex);
798 if (enable && !css->cm_enabled) {
799 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
800 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
801 if (!css->cub_addr1 || !css->cub_addr2) {
802 free_page((unsigned long)css->cub_addr1);
803 free_page((unsigned long)css->cub_addr2);
804 free_page((unsigned long)secm_area);
805 mutex_unlock(&css->mutex);
809 ret = __chsc_do_secm(css, enable, secm_area);
811 css->cm_enabled = enable;
812 if (css->cm_enabled) {
813 ret = chsc_add_cmg_attr(css);
815 memset(secm_area, 0, PAGE_SIZE);
816 __chsc_do_secm(css, 0, secm_area);
820 chsc_remove_cmg_attr(css);
822 if (!css->cm_enabled) {
823 free_page((unsigned long)css->cub_addr1);
824 free_page((unsigned long)css->cub_addr2);
826 mutex_unlock(&css->mutex);
827 free_page((unsigned long)secm_area);
831 int chsc_determine_channel_path_description(struct chp_id chpid,
832 struct channel_path_desc *desc)
837 struct chsc_header request;
843 struct chsc_header response;
845 struct channel_path_desc desc;
846 } __attribute__ ((packed)) *scpd_area;
848 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
852 scpd_area->request.length = 0x0010;
853 scpd_area->request.code = 0x0002;
855 scpd_area->first_chpid = chpid.id;
856 scpd_area->last_chpid = chpid.id;
858 ccode = chsc(scpd_area);
860 ret = (ccode == 3) ? -ENODEV : -EBUSY;
864 switch (scpd_area->response.code) {
865 case 0x0001: /* Success. */
866 memcpy(desc, &scpd_area->desc,
867 sizeof(struct channel_path_desc));
870 case 0x0003: /* Invalid block. */
871 case 0x0007: /* Invalid format. */
872 case 0x0008: /* Other invalid block. */
873 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
876 case 0x0004: /* Command not provided in model. */
877 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
881 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
882 scpd_area->response.code);
886 free_page((unsigned long)scpd_area);
891 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
892 struct cmg_chars *chars)
897 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
899 if (chp->cmg_chars) {
901 struct cmg_chars *cmg_chars;
903 cmg_chars = chp->cmg_chars;
904 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
905 mask = 0x80 >> (i + 3);
907 cmg_chars->values[i] = chars->values[i];
909 cmg_chars->values[i] = 0;
914 /* No cmg-dependent data. */
919 int chsc_get_channel_measurement_chars(struct channel_path *chp)
924 struct chsc_header request;
930 struct chsc_header response;
941 u32 data[NR_MEASUREMENT_CHARS];
942 } __attribute__ ((packed)) *scmc_area;
944 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
948 scmc_area->request.length = 0x0010;
949 scmc_area->request.code = 0x0022;
951 scmc_area->first_chpid = chp->chpid.id;
952 scmc_area->last_chpid = chp->chpid.id;
954 ccode = chsc(scmc_area);
956 ret = (ccode == 3) ? -ENODEV : -EBUSY;
960 switch (scmc_area->response.code) {
961 case 0x0001: /* Success. */
962 if (!scmc_area->not_valid) {
963 chp->cmg = scmc_area->cmg;
964 chp->shared = scmc_area->shared;
965 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
974 case 0x0003: /* Invalid block. */
975 case 0x0007: /* Invalid format. */
976 case 0x0008: /* Invalid bit combination. */
977 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
980 case 0x0004: /* Command not provided. */
981 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
985 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
986 scmc_area->response.code);
990 free_page((unsigned long)scmc_area);
994 int __init chsc_alloc_sei_area(void)
996 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
998 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
999 "chsc machine checks!\n");
1000 return (sei_page ? 0 : -ENOMEM);
1003 void __init chsc_free_sei_area(void)
1009 chsc_enable_facility(int operation_code)
1013 struct chsc_header request;
1020 u32 operation_data_area[252];
1021 struct chsc_header response;
1025 } __attribute__ ((packed)) *sda_area;
1027 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1030 sda_area->request.length = 0x0400;
1031 sda_area->request.code = 0x0031;
1032 sda_area->operation_code = operation_code;
1034 ret = chsc(sda_area);
1036 ret = (ret == 3) ? -ENODEV : -EBUSY;
1039 switch (sda_area->response.code) {
1040 case 0x0001: /* everything ok */
1043 case 0x0003: /* invalid request block */
1047 case 0x0004: /* command not provided */
1048 case 0x0101: /* facility not provided */
1051 default: /* something went wrong */
1055 free_page((unsigned long)sda_area);
1059 struct css_general_char css_general_characteristics;
1060 struct css_chsc_char css_chsc_characteristics;
1063 chsc_determine_css_characteristics(void)
1067 struct chsc_header request;
1071 struct chsc_header response;
1073 u32 general_char[510];
1075 } __attribute__ ((packed)) *scsc_area;
1077 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1079 CIO_MSG_EVENT(0, "Was not able to determine available"
1080 "CHSCs due to no memory.\n");
1084 scsc_area->request.length = 0x0010;
1085 scsc_area->request.code = 0x0010;
1087 result = chsc(scsc_area);
1089 CIO_MSG_EVENT(0, "Was not able to determine available CHSCs, "
1090 "cc=%i.\n", result);
1095 if (scsc_area->response.code != 1) {
1096 CIO_MSG_EVENT(0, "Was not able to determine "
1097 "available CHSCs.\n");
1101 memcpy(&css_general_characteristics, scsc_area->general_char,
1102 sizeof(css_general_characteristics));
1103 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1104 sizeof(css_chsc_characteristics));
1106 free_page ((unsigned long) scsc_area);
1110 EXPORT_SYMBOL_GPL(css_general_characteristics);
1111 EXPORT_SYMBOL_GPL(css_chsc_characteristics);