2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
8 * Author(s): Ingo Adlung (adlung@de.ibm.com)
9 * Cornelia Huck (cohuck@de.ibm.com)
10 * Arnd Bergmann (arndb@de.ibm.com)
13 #include <linux/module.h>
14 #include <linux/config.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/device.h>
23 #include "cio_debug.h"
27 static void *sei_page;
29 static int new_channel_path(int chpid);
32 set_chp_logically_online(int chp, int onoff)
34 css[0]->chps[chp]->state = onoff;
38 get_chp_status(int chp)
40 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
44 chsc_validate_chpids(struct subchannel *sch)
48 for (chp = 0; chp <= 7; chp++) {
50 if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
51 /* disable using this path */
57 chpid_is_actually_online(int chp)
61 state = get_chp_status(chp);
64 queue_work(slow_path_wq, &slow_path_work);
69 /* FIXME: this is _always_ called for every subchannel. shouldn't we
70 * process more than one at a time? */
72 chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
77 struct chsc_header request;
79 u16 f_sch; /* first subchannel */
81 u16 l_sch; /* last subchannel */
83 struct chsc_header response;
87 u8 st : 3; /* subchannel type */
89 u8 unit_addr; /* unit address */
90 u16 devno; /* device number */
93 u16 sch; /* subchannel */
94 u8 chpid[8]; /* chpids 0-7 */
95 u16 fla[8]; /* full link addresses 0-7 */
100 ssd_area->request = (struct chsc_header) {
105 ssd_area->f_sch = sch->schid.sch_no;
106 ssd_area->l_sch = sch->schid.sch_no;
108 ccode = chsc(ssd_area);
110 pr_debug("chsc returned with ccode = %d\n", ccode);
111 return (ccode == 3) ? -ENODEV : -EBUSY;
114 switch (ssd_area->response.code) {
115 case 0x0001: /* everything ok */
118 CIO_CRW_EVENT(2, "Invalid command!\n");
121 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
124 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
127 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
128 ssd_area->response.code);
133 * ssd_area->st stores the type of the detected
134 * subchannel, with the following definitions:
136 * 0: I/O subchannel: All fields have meaning
137 * 1: CHSC subchannel: Only sch_val, st and sch
139 * 2: Message subchannel: All fields except unit_addr
141 * 3: ADM subchannel: Only sch_val, st and sch
144 * Other types are currently undefined.
146 if (ssd_area->st > 3) { /* uhm, that looks strange... */
147 CIO_CRW_EVENT(0, "Strange subchannel type %d"
148 " for sch %04x\n", ssd_area->st,
151 * There may have been a new subchannel type defined in the
152 * time since this code was written; since we don't know which
153 * fields have meaning and what to do with it we just jump out
157 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
158 CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n",
159 sch->schid.sch_no, type[ssd_area->st]);
161 sch->ssd_info.valid = 1;
162 sch->ssd_info.type = ssd_area->st;
165 if (ssd_area->st == 0 || ssd_area->st == 2) {
166 for (j = 0; j < 8; j++) {
167 if (!((0x80 >> j) & ssd_area->path_mask &
168 ssd_area->fla_valid_mask))
170 sch->ssd_info.chpid[j] = ssd_area->chpid[j];
171 sch->ssd_info.fla[j] = ssd_area->fla[j];
178 css_get_ssd_info(struct subchannel *sch)
183 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
186 spin_lock_irq(&sch->lock);
187 ret = chsc_get_sch_desc_irq(sch, page);
189 static int cio_chsc_err_msg;
191 if (!cio_chsc_err_msg) {
193 "chsc_get_sch_descriptions:"
194 " Error %d while doing chsc; "
195 "processing some machine checks may "
197 cio_chsc_err_msg = 1;
200 spin_unlock_irq(&sch->lock);
201 free_page((unsigned long)page);
204 /* Allocate channel path structures, if needed. */
205 for (j = 0; j < 8; j++) {
206 chpid = sch->ssd_info.chpid[j];
207 if (chpid && (get_chp_status(chpid) < 0))
208 new_channel_path(chpid);
215 s390_subchannel_remove_chpid(struct device *dev, void *data)
219 struct subchannel *sch;
220 struct channel_path *chpid;
223 sch = to_subchannel(dev);
225 for (j = 0; j < 8; j++)
226 if (sch->schib.pmcw.chpid[j] == chpid->id)
232 spin_lock(&sch->lock);
234 stsch(sch->schid, &schib);
237 memcpy(&sch->schib, &schib, sizeof(struct schib));
238 /* Check for single path devices. */
239 if (sch->schib.pmcw.pim == 0x80)
241 if (sch->vpm == mask)
244 if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND |
245 SCSW_ACTL_HALT_PEND |
246 SCSW_ACTL_START_PEND |
247 SCSW_ACTL_RESUME_PEND)) &&
248 (sch->schib.pmcw.lpum == mask)) {
249 int cc = cio_cancel(sch);
259 if (sch->driver && sch->driver->termination)
260 sch->driver->termination(&sch->dev);
263 } else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
264 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
265 (sch->schib.pmcw.lpum == mask)) {
272 if (sch->driver && sch->driver->termination)
273 sch->driver->termination(&sch->dev);
277 /* trigger path verification. */
278 if (sch->driver && sch->driver->verify)
279 sch->driver->verify(&sch->dev);
281 spin_unlock(&sch->lock);
284 spin_unlock(&sch->lock);
286 if (css_enqueue_subchannel_slow(sch->schid)) {
287 css_clear_subchannel_slow_list();
294 s390_set_chpid_offline( __u8 chpid)
299 sprintf(dbf_txt, "chpr%x", chpid);
300 CIO_TRACE_EVENT(2, dbf_txt);
302 if (get_chp_status(chpid) <= 0)
304 dev = get_device(&css[0]->chps[chpid]->dev);
305 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
306 s390_subchannel_remove_chpid);
308 if (need_rescan || css_slow_subchannels_exist())
309 queue_work(slow_path_wq, &slow_path_work);
313 struct res_acc_data {
314 struct channel_path *chp;
320 s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
327 for (chp = 0; chp <= 7; chp++)
329 * check if chpid is in information updated by ssd
331 if (sch->ssd_info.valid &&
332 sch->ssd_info.chpid[chp] == res_data->chp->id &&
333 (sch->ssd_info.fla[chp] & res_data->fla_mask)
343 * Do a stsch to update our subchannel structure with the
344 * new path information and eventually check for logically
347 ccode = stsch(sch->schid, &sch->schib);
355 s390_process_res_acc_new_sch(struct subchannel_id schid)
360 * We don't know the device yet, but since a path
361 * may be available now to the device we'll have
362 * to do recognition again.
363 * Since we don't have any idea about which chpid
364 * that beast may be on we'll have to do a stsch
365 * on all devices, grr...
367 if (stsch(schid, &schib))
369 return need_rescan ? -EAGAIN : -ENXIO;
371 /* Put it on the slow path. */
372 ret = css_enqueue_subchannel_slow(schid);
374 css_clear_subchannel_slow_list();
382 __s390_process_res_acc(struct subchannel_id schid, void *data)
384 int chp_mask, old_lpm;
385 struct res_acc_data *res_data;
386 struct subchannel *sch;
388 res_data = (struct res_acc_data *)data;
389 sch = get_subchannel_by_schid(schid);
391 /* Check if a subchannel is newly available. */
392 return s390_process_res_acc_new_sch(schid);
394 spin_lock_irq(&sch->lock);
396 chp_mask = s390_process_res_acc_sch(res_data, sch);
399 spin_unlock_irq(&sch->lock);
403 sch->lpm = ((sch->schib.pmcw.pim &
404 sch->schib.pmcw.pam &
406 | chp_mask) & sch->opm;
407 if (!old_lpm && sch->lpm)
408 device_trigger_reprobe(sch);
409 else if (sch->driver && sch->driver->verify)
410 sch->driver->verify(&sch->dev);
412 spin_unlock_irq(&sch->lock);
413 put_device(&sch->dev);
414 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
419 s390_process_res_acc (struct res_acc_data *res_data)
424 sprintf(dbf_txt, "accpr%x", res_data->chp->id);
425 CIO_TRACE_EVENT( 2, dbf_txt);
426 if (res_data->fla != 0) {
427 sprintf(dbf_txt, "fla%x", res_data->fla);
428 CIO_TRACE_EVENT( 2, dbf_txt);
432 * I/O resources may have become accessible.
433 * Scan through all subchannels that may be concerned and
434 * do a validation on those.
435 * The more information we have (info), the less scanning
436 * will we have to do.
438 rc = for_each_subchannel(__s390_process_res_acc, res_data);
439 if (css_slow_subchannels_exist())
441 else if (rc != -EAGAIN)
447 __get_chpid_from_lir(void *data)
453 /* incident-node descriptor */
455 /* attached-node descriptor */
457 /* incident-specific information */
461 lir = (struct lir*) data;
463 /* NULL link incident record */
465 if (!(lir->indesc[0]&0xc0000000))
466 /* node descriptor not valid */
468 if (!(lir->indesc[0]&0x10000000))
469 /* don't handle device-type nodes - FIXME */
471 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
473 return (u16) (lir->indesc[0]&0x000000ff);
477 chsc_process_crw(void)
480 struct res_acc_data res_data;
482 struct chsc_header request;
486 struct chsc_header response;
489 u8 vf; /* validity flags */
490 u8 rs; /* reporting source */
491 u8 cc; /* content code */
492 u16 fla; /* full link address */
493 u16 rsid; /* reporting source id */
496 u32 ccdf[96]; /* content-code dependent field */
497 /* ccdf has to be big enough for a link-incident record */
503 * build the chsc request block for store event information
505 * This function is only called by the machine check handler thread,
506 * so we don't need locking for the sei_page.
510 CIO_TRACE_EVENT( 2, "prcss");
515 memset(sei_area, 0, sizeof(*sei_area));
516 memset(&res_data, 0, sizeof(struct res_acc_data));
517 sei_area->request = (struct chsc_header) {
522 ccode = chsc(sei_area);
526 switch (sei_area->response.code) {
527 /* for debug purposes, check for problems */
529 CIO_CRW_EVENT(4, "chsc_process_crw: event information "
530 "successfully stored\n");
531 break; /* everything ok */
534 "chsc_process_crw: invalid command!\n");
537 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
541 CIO_CRW_EVENT(2, "chsc_process_crw: no event "
542 "information stored\n");
545 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
546 sei_area->response.code);
550 /* Check if we might have lost some information. */
551 if (sei_area->flags & 0x40)
552 CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
553 "has been lost due to overflow!\n");
555 if (sei_area->rs != 4) {
556 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
557 "(%04X) isn't a chpid!\n",
562 /* which kind of information was stored? */
563 switch (sei_area->cc) {
564 case 1: /* link incident*/
565 CIO_CRW_EVENT(4, "chsc_process_crw: "
566 "channel subsystem reports link incident,"
567 " reporting source is chpid %x\n",
569 chpid = __get_chpid_from_lir(sei_area->ccdf);
571 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
574 s390_set_chpid_offline(chpid);
577 case 2: /* i/o resource accessibiliy */
578 CIO_CRW_EVENT(4, "chsc_process_crw: "
579 "channel subsystem reports some I/O "
580 "devices may have become accessible\n");
581 pr_debug("Data received after sei: \n");
582 pr_debug("Validity flags: %x\n", sei_area->vf);
584 /* allocate a new channel path structure, if needed */
585 status = get_chp_status(sei_area->rsid);
587 new_channel_path(sei_area->rsid);
590 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
591 res_data.chp = to_channelpath(dev);
592 pr_debug("chpid: %x", sei_area->rsid);
593 if ((sei_area->vf & 0xc0) != 0) {
594 res_data.fla = sei_area->fla;
595 if ((sei_area->vf & 0xc0) == 0xc0) {
596 pr_debug(" full link addr: %x",
598 res_data.fla_mask = 0xffff;
600 pr_debug(" link addr: %x",
602 res_data.fla_mask = 0xff00;
605 ret = s390_process_res_acc(&res_data);
610 default: /* other stuff */
611 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
615 } while (sei_area->flags & 0x80);
620 __chp_add_new_sch(struct subchannel_id schid)
625 if (stsch(schid, &schib))
627 return need_rescan ? -EAGAIN : -ENXIO;
629 /* Put it on the slow path. */
630 ret = css_enqueue_subchannel_slow(schid);
632 css_clear_subchannel_slow_list();
641 __chp_add(struct subchannel_id schid, void *data)
644 struct channel_path *chp;
645 struct subchannel *sch;
647 chp = (struct channel_path *)data;
648 sch = get_subchannel_by_schid(schid);
650 /* Check if the subchannel is now available. */
651 return __chp_add_new_sch(schid);
652 spin_lock(&sch->lock);
654 if (sch->schib.pmcw.chpid[i] == chp->id) {
655 if (stsch(sch->schid, &sch->schib) != 0) {
657 spin_unlock(&sch->lock);
663 spin_unlock(&sch->lock);
666 sch->lpm = ((sch->schib.pmcw.pim &
667 sch->schib.pmcw.pam &
669 | 0x80 >> i) & sch->opm;
671 if (sch->driver && sch->driver->verify)
672 sch->driver->verify(&sch->dev);
674 spin_unlock(&sch->lock);
675 put_device(&sch->dev);
686 if (!get_chp_status(chpid))
687 return 0; /* no need to do the rest */
689 sprintf(dbf_txt, "cadd%x", chpid);
690 CIO_TRACE_EVENT(2, dbf_txt);
692 dev = get_device(&css[0]->chps[chpid]->dev);
693 rc = for_each_subchannel(__chp_add, to_channelpath(dev));
694 if (css_slow_subchannels_exist())
703 * Handling of crw machine checks with channel path source.
706 chp_process_crw(int chpid, int on)
709 /* Path has gone. We use the link incident routine.*/
710 s390_set_chpid_offline(chpid);
711 return 0; /* De-register is async anyway. */
714 * Path has come. Allocate a new channel path structure,
717 if (get_chp_status(chpid) < 0)
718 new_channel_path(chpid);
719 /* Avoid the extra overhead in process_rec_acc. */
720 return chp_add(chpid);
724 __check_for_io_and_kill(struct subchannel *sch, int index)
728 if (!device_is_online(sch))
729 /* cio could be doing I/O. */
731 cc = stsch(sch->schid, &sch->schib);
734 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
735 device_set_waiting(sch);
742 __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
747 if (!sch->ssd_info.valid)
750 spin_lock_irqsave(&sch->lock, flags);
752 for (chp = 0; chp < 8; chp++) {
753 if (sch->ssd_info.chpid[chp] != chpid)
757 sch->opm |= (0x80 >> chp);
758 sch->lpm |= (0x80 >> chp);
760 device_trigger_reprobe(sch);
761 else if (sch->driver && sch->driver->verify)
762 sch->driver->verify(&sch->dev);
764 sch->opm &= ~(0x80 >> chp);
765 sch->lpm &= ~(0x80 >> chp);
767 * Give running I/O a grace period in which it
768 * can successfully terminate, even using the
769 * just varied off path. Then kill it.
771 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
772 if (css_enqueue_subchannel_slow(sch->schid)) {
773 css_clear_subchannel_slow_list();
776 } else if (sch->driver && sch->driver->verify)
777 sch->driver->verify(&sch->dev);
781 spin_unlock_irqrestore(&sch->lock, flags);
785 s390_subchannel_vary_chpid_off(struct device *dev, void *data)
787 struct subchannel *sch;
790 sch = to_subchannel(dev);
793 __s390_subchannel_vary_chpid(sch, *chpid, 0);
798 s390_subchannel_vary_chpid_on(struct device *dev, void *data)
800 struct subchannel *sch;
803 sch = to_subchannel(dev);
806 __s390_subchannel_vary_chpid(sch, *chpid, 1);
811 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
814 struct subchannel *sch;
816 sch = get_subchannel_by_schid(schid);
818 put_device(&sch->dev);
821 if (stsch(schid, &schib))
824 /* Put it on the slow path. */
825 if (css_enqueue_subchannel_slow(schid)) {
826 css_clear_subchannel_slow_list();
834 * Function: s390_vary_chpid
835 * Varies the specified chpid online or offline
838 s390_vary_chpid( __u8 chpid, int on)
843 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
844 CIO_TRACE_EVENT( 2, dbf_text);
846 status = get_chp_status(chpid);
848 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
852 if (!on && !status) {
853 printk(KERN_ERR "chpid %x is already offline\n", chpid);
857 set_chp_logically_online(chpid, on);
860 * Redo PathVerification on the devices the chpid connects to
863 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
864 s390_subchannel_vary_chpid_on :
865 s390_subchannel_vary_chpid_off);
867 /* Scan for new devices on varied on path. */
868 for_each_subchannel(__s390_vary_chpid_on, NULL);
869 if (need_rescan || css_slow_subchannels_exist())
870 queue_work(slow_path_wq, &slow_path_work);
875 * Files for the channel path entries.
878 chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
880 struct channel_path *chp = container_of(dev, struct channel_path, dev);
884 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
885 sprintf(buf, "offline\n"));
889 chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
891 struct channel_path *cp = container_of(dev, struct channel_path, dev);
896 num_args = sscanf(buf, "%5s", cmd);
900 if (!strnicmp(cmd, "on", 2))
901 error = s390_vary_chpid(cp->id, 1);
902 else if (!strnicmp(cmd, "off", 3))
903 error = s390_vary_chpid(cp->id, 0);
907 return error < 0 ? error : count;
911 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
914 chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
916 struct channel_path *chp = container_of(dev, struct channel_path, dev);
920 return sprintf(buf, "%x\n", chp->desc.desc);
923 static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
925 static struct attribute * chp_attrs[] = {
926 &dev_attr_status.attr,
931 static struct attribute_group chp_attr_group = {
936 chp_release(struct device *dev)
938 struct channel_path *cp;
940 cp = container_of(dev, struct channel_path, dev);
945 chsc_determine_channel_path_description(int chpid,
946 struct channel_path_desc *desc)
951 struct chsc_header request;
957 struct chsc_header response;
959 struct channel_path_desc desc;
962 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
966 scpd_area->request = (struct chsc_header) {
971 scpd_area->first_chpid = chpid;
972 scpd_area->last_chpid = chpid;
974 ccode = chsc(scpd_area);
976 ret = (ccode == 3) ? -ENODEV : -EBUSY;
980 switch (scpd_area->response.code) {
981 case 0x0001: /* Success. */
982 memcpy(desc, &scpd_area->desc,
983 sizeof(struct channel_path_desc));
986 case 0x0003: /* Invalid block. */
987 case 0x0007: /* Invalid format. */
988 case 0x0008: /* Other invalid block. */
989 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
992 case 0x0004: /* Command not provided in model. */
993 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
997 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
998 scpd_area->response.code);
1002 free_page((unsigned long)scpd_area);
1007 * Entries for chpids on the system bus.
1008 * This replaces /proc/chpids.
1011 new_channel_path(int chpid)
1013 struct channel_path *chp;
1016 chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL);
1019 memset(chp, 0, sizeof(struct channel_path));
1021 /* fill in status, etc. */
1024 chp->dev = (struct device) {
1025 .parent = &css[0]->device,
1026 .release = chp_release,
1028 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1030 /* Obtain channel path description and fill it in. */
1031 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1035 /* make it known to the system */
1036 ret = device_register(&chp->dev);
1038 printk(KERN_WARNING "%s: could not register %02x\n",
1042 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1044 device_unregister(&chp->dev);
1047 css[0]->chps[chpid] = chp;
1055 chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1057 struct channel_path *chp;
1058 struct channel_path_desc *desc;
1060 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1063 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1066 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1072 chsc_alloc_sei_area(void)
1074 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1076 printk(KERN_WARNING"Can't allocate page for processing of " \
1077 "chsc machine checks!\n");
1078 return (sei_page ? 0 : -ENOMEM);
1081 subsys_initcall(chsc_alloc_sei_area);
1083 struct css_general_char css_general_characteristics;
1084 struct css_chsc_char css_chsc_characteristics;
1087 chsc_determine_css_characteristics(void)
1091 struct chsc_header request;
1095 struct chsc_header response;
1097 u32 general_char[510];
1101 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1103 printk(KERN_WARNING"cio: Was not able to determine available" \
1104 "CHSCs due to no memory.\n");
1108 scsc_area->request = (struct chsc_header) {
1113 result = chsc(scsc_area);
1115 printk(KERN_WARNING"cio: Was not able to determine " \
1116 "available CHSCs, cc=%i.\n", result);
1121 if (scsc_area->response.code != 1) {
1122 printk(KERN_WARNING"cio: Was not able to determine " \
1123 "available CHSCs.\n");
1127 memcpy(&css_general_characteristics, scsc_area->general_char,
1128 sizeof(css_general_characteristics));
1129 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1130 sizeof(css_chsc_characteristics));
1132 free_page ((unsigned long) scsc_area);
1136 EXPORT_SYMBOL_GPL(css_general_characteristics);
1137 EXPORT_SYMBOL_GPL(css_chsc_characteristics);