[S390] cio: Get rid of css_characteristics_avail.
[linux-2.6] / drivers / s390 / cio / css.c
1 /*
2  *  drivers/s390/cio/css.c
3  *  driver for channel subsystem
4  *
5  *    Copyright IBM Corp. 2002,2008
6  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *               Cornelia Huck (cornelia.huck@de.ibm.com)
8  */
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/device.h>
12 #include <linux/slab.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/reboot.h>
16
17 #include "../s390mach.h"
18 #include "css.h"
19 #include "cio.h"
20 #include "cio_debug.h"
21 #include "ioasm.h"
22 #include "chsc.h"
23 #include "device.h"
24 #include "idset.h"
25 #include "chp.h"
26
27 int css_init_done = 0;
28 static int need_reprobe = 0;
29 static int max_ssid = 0;
30
31 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
32
33 int
34 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
35 {
36         struct subchannel_id schid;
37         int ret;
38
39         init_subchannel_id(&schid);
40         ret = -ENODEV;
41         do {
42                 do {
43                         ret = fn(schid, data);
44                         if (ret)
45                                 break;
46                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
47                 schid.sch_no = 0;
48         } while (schid.ssid++ < max_ssid);
49         return ret;
50 }
51
52 struct cb_data {
53         void *data;
54         struct idset *set;
55         int (*fn_known_sch)(struct subchannel *, void *);
56         int (*fn_unknown_sch)(struct subchannel_id, void *);
57 };
58
59 static int call_fn_known_sch(struct device *dev, void *data)
60 {
61         struct subchannel *sch = to_subchannel(dev);
62         struct cb_data *cb = data;
63         int rc = 0;
64
65         idset_sch_del(cb->set, sch->schid);
66         if (cb->fn_known_sch)
67                 rc = cb->fn_known_sch(sch, cb->data);
68         return rc;
69 }
70
71 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
72 {
73         struct cb_data *cb = data;
74         int rc = 0;
75
76         if (idset_sch_contains(cb->set, schid))
77                 rc = cb->fn_unknown_sch(schid, cb->data);
78         return rc;
79 }
80
81 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
82                                int (*fn_unknown)(struct subchannel_id,
83                                void *), void *data)
84 {
85         struct cb_data cb;
86         int rc;
87
88         cb.set = idset_sch_new();
89         if (!cb.set)
90                 return -ENOMEM;
91         idset_fill(cb.set);
92         cb.data = data;
93         cb.fn_known_sch = fn_known;
94         cb.fn_unknown_sch = fn_unknown;
95         /* Process registered subchannels. */
96         rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
97         if (rc)
98                 goto out;
99         /* Process unregistered subchannels. */
100         if (fn_unknown)
101                 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
102 out:
103         idset_free(cb.set);
104
105         return rc;
106 }
107
108 static struct subchannel *
109 css_alloc_subchannel(struct subchannel_id schid)
110 {
111         struct subchannel *sch;
112         int ret;
113
114         sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
115         if (sch == NULL)
116                 return ERR_PTR(-ENOMEM);
117         ret = cio_validate_subchannel (sch, schid);
118         if (ret < 0) {
119                 kfree(sch);
120                 return ERR_PTR(ret);
121         }
122         return sch;
123 }
124
125 static void
126 css_free_subchannel(struct subchannel *sch)
127 {
128         if (sch) {
129                 /* Reset intparm to zeroes. */
130                 sch->schib.pmcw.intparm = 0;
131                 cio_modify(sch);
132                 kfree(sch->lock);
133                 kfree(sch);
134         }
135 }
136
137 static void
138 css_subchannel_release(struct device *dev)
139 {
140         struct subchannel *sch;
141
142         sch = to_subchannel(dev);
143         if (!cio_is_console(sch->schid)) {
144                 kfree(sch->lock);
145                 kfree(sch);
146         }
147 }
148
149 static int css_sch_device_register(struct subchannel *sch)
150 {
151         int ret;
152
153         mutex_lock(&sch->reg_mutex);
154         ret = device_register(&sch->dev);
155         mutex_unlock(&sch->reg_mutex);
156         return ret;
157 }
158
159 /**
160  * css_sch_device_unregister - unregister a subchannel
161  * @sch: subchannel to be unregistered
162  */
163 void css_sch_device_unregister(struct subchannel *sch)
164 {
165         mutex_lock(&sch->reg_mutex);
166         device_unregister(&sch->dev);
167         mutex_unlock(&sch->reg_mutex);
168 }
169 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
170
171 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
172 {
173         int i;
174         int mask;
175
176         memset(ssd, 0, sizeof(struct chsc_ssd_info));
177         ssd->path_mask = pmcw->pim;
178         for (i = 0; i < 8; i++) {
179                 mask = 0x80 >> i;
180                 if (pmcw->pim & mask) {
181                         chp_id_init(&ssd->chpid[i]);
182                         ssd->chpid[i].id = pmcw->chpid[i];
183                 }
184         }
185 }
186
187 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
188 {
189         int i;
190         int mask;
191
192         for (i = 0; i < 8; i++) {
193                 mask = 0x80 >> i;
194                 if (ssd->path_mask & mask)
195                         if (!chp_is_registered(ssd->chpid[i]))
196                                 chp_new(ssd->chpid[i]);
197         }
198 }
199
200 void css_update_ssd_info(struct subchannel *sch)
201 {
202         int ret;
203
204         if (cio_is_console(sch->schid)) {
205                 /* Console is initialized too early for functions requiring
206                  * memory allocation. */
207                 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
208         } else {
209                 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
210                 if (ret)
211                         ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
212                 ssd_register_chpids(&sch->ssd_info);
213         }
214 }
215
216 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
217                          char *buf)
218 {
219         struct subchannel *sch = to_subchannel(dev);
220
221         return sprintf(buf, "%01x\n", sch->st);
222 }
223
224 static DEVICE_ATTR(type, 0444, type_show, NULL);
225
226 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
227                              char *buf)
228 {
229         struct subchannel *sch = to_subchannel(dev);
230
231         return sprintf(buf, "css:t%01X\n", sch->st);
232 }
233
234 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
235
236 static struct attribute *subch_attrs[] = {
237         &dev_attr_type.attr,
238         &dev_attr_modalias.attr,
239         NULL,
240 };
241
242 static struct attribute_group subch_attr_group = {
243         .attrs = subch_attrs,
244 };
245
246 static struct attribute_group *default_subch_attr_groups[] = {
247         &subch_attr_group,
248         NULL,
249 };
250
251 static int css_register_subchannel(struct subchannel *sch)
252 {
253         int ret;
254
255         /* Initialize the subchannel structure */
256         sch->dev.parent = &channel_subsystems[0]->device;
257         sch->dev.bus = &css_bus_type;
258         sch->dev.release = &css_subchannel_release;
259         sch->dev.groups = default_subch_attr_groups;
260         /*
261          * We don't want to generate uevents for I/O subchannels that don't
262          * have a working ccw device behind them since they will be
263          * unregistered before they can be used anyway, so we delay the add
264          * uevent until after device recognition was successful.
265          * Note that we suppress the uevent for all subchannel types;
266          * the subchannel driver can decide itself when it wants to inform
267          * userspace of its existence.
268          */
269         sch->dev.uevent_suppress = 1;
270         css_update_ssd_info(sch);
271         /* make it known to the system */
272         ret = css_sch_device_register(sch);
273         if (ret) {
274                 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
275                               sch->schid.ssid, sch->schid.sch_no, ret);
276                 return ret;
277         }
278         if (!sch->driver) {
279                 /*
280                  * No driver matched. Generate the uevent now so that
281                  * a fitting driver module may be loaded based on the
282                  * modalias.
283                  */
284                 sch->dev.uevent_suppress = 0;
285                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
286         }
287         return ret;
288 }
289
290 int css_probe_device(struct subchannel_id schid)
291 {
292         int ret;
293         struct subchannel *sch;
294
295         sch = css_alloc_subchannel(schid);
296         if (IS_ERR(sch))
297                 return PTR_ERR(sch);
298         ret = css_register_subchannel(sch);
299         if (ret)
300                 css_free_subchannel(sch);
301         return ret;
302 }
303
304 static int
305 check_subchannel(struct device * dev, void * data)
306 {
307         struct subchannel *sch;
308         struct subchannel_id *schid = data;
309
310         sch = to_subchannel(dev);
311         return schid_equal(&sch->schid, schid);
312 }
313
314 struct subchannel *
315 get_subchannel_by_schid(struct subchannel_id schid)
316 {
317         struct device *dev;
318
319         dev = bus_find_device(&css_bus_type, NULL,
320                               &schid, check_subchannel);
321
322         return dev ? to_subchannel(dev) : NULL;
323 }
324
325 /**
326  * css_sch_is_valid() - check if a subchannel is valid
327  * @schib: subchannel information block for the subchannel
328  */
329 int css_sch_is_valid(struct schib *schib)
330 {
331         if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
332                 return 0;
333         if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
334                 return 0;
335         return 1;
336 }
337 EXPORT_SYMBOL_GPL(css_sch_is_valid);
338
339 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
340 {
341         struct schib schib;
342
343         if (!slow) {
344                 /* Will be done on the slow path. */
345                 return -EAGAIN;
346         }
347         if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
348                 /* Unusable - ignore. */
349                 return 0;
350         }
351         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
352                          "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
353
354         return css_probe_device(schid);
355 }
356
357 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
358 {
359         int ret = 0;
360
361         if (sch->driver) {
362                 if (sch->driver->sch_event)
363                         ret = sch->driver->sch_event(sch, slow);
364                 else
365                         dev_dbg(&sch->dev,
366                                 "Got subchannel machine check but "
367                                 "no sch_event handler provided.\n");
368         }
369         return ret;
370 }
371
372 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
373 {
374         struct subchannel *sch;
375         int ret;
376
377         sch = get_subchannel_by_schid(schid);
378         if (sch) {
379                 ret = css_evaluate_known_subchannel(sch, slow);
380                 put_device(&sch->dev);
381         } else
382                 ret = css_evaluate_new_subchannel(schid, slow);
383         if (ret == -EAGAIN)
384                 css_schedule_eval(schid);
385 }
386
387 static struct idset *slow_subchannel_set;
388 static spinlock_t slow_subchannel_lock;
389
390 static int __init slow_subchannel_init(void)
391 {
392         spin_lock_init(&slow_subchannel_lock);
393         slow_subchannel_set = idset_sch_new();
394         if (!slow_subchannel_set) {
395                 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
396                 return -ENOMEM;
397         }
398         return 0;
399 }
400
401 static int slow_eval_known_fn(struct subchannel *sch, void *data)
402 {
403         int eval;
404         int rc;
405
406         spin_lock_irq(&slow_subchannel_lock);
407         eval = idset_sch_contains(slow_subchannel_set, sch->schid);
408         idset_sch_del(slow_subchannel_set, sch->schid);
409         spin_unlock_irq(&slow_subchannel_lock);
410         if (eval) {
411                 rc = css_evaluate_known_subchannel(sch, 1);
412                 if (rc == -EAGAIN)
413                         css_schedule_eval(sch->schid);
414         }
415         return 0;
416 }
417
418 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
419 {
420         int eval;
421         int rc = 0;
422
423         spin_lock_irq(&slow_subchannel_lock);
424         eval = idset_sch_contains(slow_subchannel_set, schid);
425         idset_sch_del(slow_subchannel_set, schid);
426         spin_unlock_irq(&slow_subchannel_lock);
427         if (eval) {
428                 rc = css_evaluate_new_subchannel(schid, 1);
429                 switch (rc) {
430                 case -EAGAIN:
431                         css_schedule_eval(schid);
432                         rc = 0;
433                         break;
434                 case -ENXIO:
435                 case -ENOMEM:
436                 case -EIO:
437                         /* These should abort looping */
438                         break;
439                 default:
440                         rc = 0;
441                 }
442         }
443         return rc;
444 }
445
446 static void css_slow_path_func(struct work_struct *unused)
447 {
448         CIO_TRACE_EVENT(4, "slowpath");
449         for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
450                                    NULL);
451 }
452
453 static DECLARE_WORK(slow_path_work, css_slow_path_func);
454 struct workqueue_struct *slow_path_wq;
455
456 void css_schedule_eval(struct subchannel_id schid)
457 {
458         unsigned long flags;
459
460         spin_lock_irqsave(&slow_subchannel_lock, flags);
461         idset_sch_add(slow_subchannel_set, schid);
462         queue_work(slow_path_wq, &slow_path_work);
463         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
464 }
465
466 void css_schedule_eval_all(void)
467 {
468         unsigned long flags;
469
470         spin_lock_irqsave(&slow_subchannel_lock, flags);
471         idset_fill(slow_subchannel_set);
472         queue_work(slow_path_wq, &slow_path_work);
473         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
474 }
475
476 void css_wait_for_slow_path(void)
477 {
478         flush_workqueue(ccw_device_notify_work);
479         flush_workqueue(slow_path_wq);
480 }
481
482 /* Reprobe subchannel if unregistered. */
483 static int reprobe_subchannel(struct subchannel_id schid, void *data)
484 {
485         int ret;
486
487         CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
488                       schid.ssid, schid.sch_no);
489         if (need_reprobe)
490                 return -EAGAIN;
491
492         ret = css_probe_device(schid);
493         switch (ret) {
494         case 0:
495                 break;
496         case -ENXIO:
497         case -ENOMEM:
498         case -EIO:
499                 /* These should abort looping */
500                 break;
501         default:
502                 ret = 0;
503         }
504
505         return ret;
506 }
507
508 /* Work function used to reprobe all unregistered subchannels. */
509 static void reprobe_all(struct work_struct *unused)
510 {
511         int ret;
512
513         CIO_MSG_EVENT(4, "reprobe start\n");
514
515         need_reprobe = 0;
516         /* Make sure initial subchannel scan is done. */
517         wait_event(ccw_device_init_wq,
518                    atomic_read(&ccw_device_init_count) == 0);
519         ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
520
521         CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
522                       need_reprobe);
523 }
524
525 static DECLARE_WORK(css_reprobe_work, reprobe_all);
526
527 /* Schedule reprobing of all unregistered subchannels. */
528 void css_schedule_reprobe(void)
529 {
530         need_reprobe = 1;
531         queue_work(slow_path_wq, &css_reprobe_work);
532 }
533
534 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
535
536 /*
537  * Called from the machine check handler for subchannel report words.
538  */
539 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
540 {
541         struct subchannel_id mchk_schid;
542
543         if (overflow) {
544                 css_schedule_eval_all();
545                 return;
546         }
547         CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
548                       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
549                       crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
550                       crw0->erc, crw0->rsid);
551         if (crw1)
552                 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
553                               "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
554                               crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
555                               crw1->anc, crw1->erc, crw1->rsid);
556         init_subchannel_id(&mchk_schid);
557         mchk_schid.sch_no = crw0->rsid;
558         if (crw1)
559                 mchk_schid.ssid = (crw1->rsid >> 8) & 3;
560
561         /*
562          * Since we are always presented with IPI in the CRW, we have to
563          * use stsch() to find out if the subchannel in question has come
564          * or gone.
565          */
566         css_evaluate_subchannel(mchk_schid, 0);
567 }
568
569 static int __init
570 __init_channel_subsystem(struct subchannel_id schid, void *data)
571 {
572         struct subchannel *sch;
573         int ret;
574
575         if (cio_is_console(schid))
576                 sch = cio_get_console_subchannel();
577         else {
578                 sch = css_alloc_subchannel(schid);
579                 if (IS_ERR(sch))
580                         ret = PTR_ERR(sch);
581                 else
582                         ret = 0;
583                 switch (ret) {
584                 case 0:
585                         break;
586                 case -ENOMEM:
587                         panic("Out of memory in init_channel_subsystem\n");
588                 /* -ENXIO: no more subchannels. */
589                 case -ENXIO:
590                         return ret;
591                 /* -EIO: this subchannel set not supported. */
592                 case -EIO:
593                         return ret;
594                 default:
595                         return 0;
596                 }
597         }
598         /*
599          * We register ALL valid subchannels in ioinfo, even those
600          * that have been present before init_channel_subsystem.
601          * These subchannels can't have been registered yet (kmalloc
602          * not working) so we do it now. This is true e.g. for the
603          * console subchannel.
604          */
605         css_register_subchannel(sch);
606         return 0;
607 }
608
609 static void __init
610 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
611 {
612         if (css_general_characteristics.mcss) {
613                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
614                 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
615         } else {
616 #ifdef CONFIG_SMP
617                 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
618 #else
619                 css->global_pgid.pgid_high.cpu_addr = 0;
620 #endif
621         }
622         css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
623         css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
624         css->global_pgid.tod_high = tod_high;
625
626 }
627
628 static void
629 channel_subsystem_release(struct device *dev)
630 {
631         struct channel_subsystem *css;
632
633         css = to_css(dev);
634         mutex_destroy(&css->mutex);
635         kfree(css);
636 }
637
638 static ssize_t
639 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
640                    char *buf)
641 {
642         struct channel_subsystem *css = to_css(dev);
643         int ret;
644
645         if (!css)
646                 return 0;
647         mutex_lock(&css->mutex);
648         ret = sprintf(buf, "%x\n", css->cm_enabled);
649         mutex_unlock(&css->mutex);
650         return ret;
651 }
652
653 static ssize_t
654 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
655                     const char *buf, size_t count)
656 {
657         struct channel_subsystem *css = to_css(dev);
658         int ret;
659         unsigned long val;
660
661         ret = strict_strtoul(buf, 16, &val);
662         if (ret)
663                 return ret;
664         mutex_lock(&css->mutex);
665         switch (val) {
666         case 0:
667                 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
668                 break;
669         case 1:
670                 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
671                 break;
672         default:
673                 ret = -EINVAL;
674         }
675         mutex_unlock(&css->mutex);
676         return ret < 0 ? ret : count;
677 }
678
679 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
680
681 static int __init setup_css(int nr)
682 {
683         u32 tod_high;
684         int ret;
685         struct channel_subsystem *css;
686
687         css = channel_subsystems[nr];
688         memset(css, 0, sizeof(struct channel_subsystem));
689         css->pseudo_subchannel =
690                 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
691         if (!css->pseudo_subchannel)
692                 return -ENOMEM;
693         css->pseudo_subchannel->dev.parent = &css->device;
694         css->pseudo_subchannel->dev.release = css_subchannel_release;
695         sprintf(css->pseudo_subchannel->dev.bus_id, "defunct");
696         ret = cio_create_sch_lock(css->pseudo_subchannel);
697         if (ret) {
698                 kfree(css->pseudo_subchannel);
699                 return ret;
700         }
701         mutex_init(&css->mutex);
702         css->valid = 1;
703         css->cssid = nr;
704         sprintf(css->device.bus_id, "css%x", nr);
705         css->device.release = channel_subsystem_release;
706         tod_high = (u32) (get_clock() >> 32);
707         css_generate_pgid(css, tod_high);
708         return 0;
709 }
710
711 static int css_reboot_event(struct notifier_block *this,
712                             unsigned long event,
713                             void *ptr)
714 {
715         int ret, i;
716
717         ret = NOTIFY_DONE;
718         for (i = 0; i <= __MAX_CSSID; i++) {
719                 struct channel_subsystem *css;
720
721                 css = channel_subsystems[i];
722                 mutex_lock(&css->mutex);
723                 if (css->cm_enabled)
724                         if (chsc_secm(css, 0))
725                                 ret = NOTIFY_BAD;
726                 mutex_unlock(&css->mutex);
727         }
728
729         return ret;
730 }
731
732 static struct notifier_block css_reboot_notifier = {
733         .notifier_call = css_reboot_event,
734 };
735
736 /*
737  * Now that the driver core is running, we can setup our channel subsystem.
738  * The struct subchannel's are created during probing (except for the
739  * static console subchannel).
740  */
741 static int __init
742 init_channel_subsystem (void)
743 {
744         int ret, i;
745
746         ret = chsc_determine_css_characteristics();
747         if (ret == -ENOMEM)
748                 goto out; /* No need to continue. */
749
750         ret = chsc_alloc_sei_area();
751         if (ret)
752                 goto out;
753
754         ret = slow_subchannel_init();
755         if (ret)
756                 goto out;
757
758         ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw);
759         if (ret)
760                 goto out;
761
762         if ((ret = bus_register(&css_bus_type)))
763                 goto out;
764
765         /* Try to enable MSS. */
766         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
767         switch (ret) {
768         case 0: /* Success. */
769                 max_ssid = __MAX_SSID;
770                 break;
771         case -ENOMEM:
772                 goto out_bus;
773         default:
774                 max_ssid = 0;
775         }
776         /* Setup css structure. */
777         for (i = 0; i <= __MAX_CSSID; i++) {
778                 struct channel_subsystem *css;
779
780                 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
781                 if (!css) {
782                         ret = -ENOMEM;
783                         goto out_unregister;
784                 }
785                 channel_subsystems[i] = css;
786                 ret = setup_css(i);
787                 if (ret)
788                         goto out_free;
789                 ret = device_register(&css->device);
790                 if (ret)
791                         goto out_free_all;
792                 if (css_chsc_characteristics.secm) {
793                         ret = device_create_file(&css->device,
794                                                  &dev_attr_cm_enable);
795                         if (ret)
796                                 goto out_device;
797                 }
798                 ret = device_register(&css->pseudo_subchannel->dev);
799                 if (ret)
800                         goto out_file;
801         }
802         ret = register_reboot_notifier(&css_reboot_notifier);
803         if (ret)
804                 goto out_pseudo;
805         css_init_done = 1;
806
807         ctl_set_bit(6, 28);
808
809         for_each_subchannel(__init_channel_subsystem, NULL);
810         return 0;
811 out_pseudo:
812         device_unregister(&channel_subsystems[i]->pseudo_subchannel->dev);
813 out_file:
814         device_remove_file(&channel_subsystems[i]->device,
815                            &dev_attr_cm_enable);
816 out_device:
817         device_unregister(&channel_subsystems[i]->device);
818 out_free_all:
819         kfree(channel_subsystems[i]->pseudo_subchannel->lock);
820         kfree(channel_subsystems[i]->pseudo_subchannel);
821 out_free:
822         kfree(channel_subsystems[i]);
823 out_unregister:
824         while (i > 0) {
825                 struct channel_subsystem *css;
826
827                 i--;
828                 css = channel_subsystems[i];
829                 device_unregister(&css->pseudo_subchannel->dev);
830                 if (css_chsc_characteristics.secm)
831                         device_remove_file(&css->device,
832                                            &dev_attr_cm_enable);
833                 device_unregister(&css->device);
834         }
835 out_bus:
836         bus_unregister(&css_bus_type);
837 out:
838         s390_unregister_crw_handler(CRW_RSC_CSS);
839         chsc_free_sei_area();
840         kfree(slow_subchannel_set);
841         printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
842                ret);
843         return ret;
844 }
845
846 int sch_is_pseudo_sch(struct subchannel *sch)
847 {
848         return sch == to_css(sch->dev.parent)->pseudo_subchannel;
849 }
850
851 /*
852  * find a driver for a subchannel. They identify by the subchannel
853  * type with the exception that the console subchannel driver has its own
854  * subchannel type although the device is an i/o subchannel
855  */
856 static int
857 css_bus_match (struct device *dev, struct device_driver *drv)
858 {
859         struct subchannel *sch = to_subchannel(dev);
860         struct css_driver *driver = to_cssdriver(drv);
861
862         if (sch->st == driver->subchannel_type)
863                 return 1;
864
865         return 0;
866 }
867
868 static int css_probe(struct device *dev)
869 {
870         struct subchannel *sch;
871         int ret;
872
873         sch = to_subchannel(dev);
874         sch->driver = to_cssdriver(dev->driver);
875         ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
876         if (ret)
877                 sch->driver = NULL;
878         return ret;
879 }
880
881 static int css_remove(struct device *dev)
882 {
883         struct subchannel *sch;
884         int ret;
885
886         sch = to_subchannel(dev);
887         ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
888         sch->driver = NULL;
889         return ret;
890 }
891
892 static void css_shutdown(struct device *dev)
893 {
894         struct subchannel *sch;
895
896         sch = to_subchannel(dev);
897         if (sch->driver && sch->driver->shutdown)
898                 sch->driver->shutdown(sch);
899 }
900
901 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
902 {
903         struct subchannel *sch = to_subchannel(dev);
904         int ret;
905
906         ret = add_uevent_var(env, "ST=%01X", sch->st);
907         if (ret)
908                 return ret;
909         ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
910         return ret;
911 }
912
913 struct bus_type css_bus_type = {
914         .name     = "css",
915         .match    = css_bus_match,
916         .probe    = css_probe,
917         .remove   = css_remove,
918         .shutdown = css_shutdown,
919         .uevent   = css_uevent,
920 };
921
922 /**
923  * css_driver_register - register a css driver
924  * @cdrv: css driver to register
925  *
926  * This is mainly a wrapper around driver_register that sets name
927  * and bus_type in the embedded struct device_driver correctly.
928  */
929 int css_driver_register(struct css_driver *cdrv)
930 {
931         cdrv->drv.name = cdrv->name;
932         cdrv->drv.bus = &css_bus_type;
933         cdrv->drv.owner = cdrv->owner;
934         return driver_register(&cdrv->drv);
935 }
936 EXPORT_SYMBOL_GPL(css_driver_register);
937
938 /**
939  * css_driver_unregister - unregister a css driver
940  * @cdrv: css driver to unregister
941  *
942  * This is a wrapper around driver_unregister.
943  */
944 void css_driver_unregister(struct css_driver *cdrv)
945 {
946         driver_unregister(&cdrv->drv);
947 }
948 EXPORT_SYMBOL_GPL(css_driver_unregister);
949
950 subsys_initcall(init_channel_subsystem);
951
952 MODULE_LICENSE("GPL");
953 EXPORT_SYMBOL(css_bus_type);