Merge branch 'cell-merge' of master.kernel.org:/pub/scm/linux/kernel/git/arnd/cell-2.6
[linux-2.6] / drivers / s390 / cio / css.c
1 /*
2  *  drivers/s390/cio/css.c
3  *  driver for channel subsystem
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
8  *               Cornelia Huck (cornelia.huck@de.ibm.com)
9  */
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/slab.h>
14 #include <linux/errno.h>
15 #include <linux/list.h>
16
17 #include "css.h"
18 #include "cio.h"
19 #include "cio_debug.h"
20 #include "ioasm.h"
21 #include "chsc.h"
22 #include "device.h"
23
24 int need_rescan = 0;
25 int css_init_done = 0;
26 static int need_reprobe = 0;
27 static int max_ssid = 0;
28
29 struct channel_subsystem *css[__MAX_CSSID + 1];
30
31 int css_characteristics_avail = 0;
32
33 inline int
34 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
35 {
36         struct subchannel_id schid;
37         int ret;
38
39         init_subchannel_id(&schid);
40         ret = -ENODEV;
41         do {
42                 do {
43                         ret = fn(schid, data);
44                         if (ret)
45                                 break;
46                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
47                 schid.sch_no = 0;
48         } while (schid.ssid++ < max_ssid);
49         return ret;
50 }
51
52 static struct subchannel *
53 css_alloc_subchannel(struct subchannel_id schid)
54 {
55         struct subchannel *sch;
56         int ret;
57
58         sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
59         if (sch == NULL)
60                 return ERR_PTR(-ENOMEM);
61         ret = cio_validate_subchannel (sch, schid);
62         if (ret < 0) {
63                 kfree(sch);
64                 return ERR_PTR(ret);
65         }
66
67         if (sch->st != SUBCHANNEL_TYPE_IO) {
68                 /* For now we ignore all non-io subchannels. */
69                 kfree(sch);
70                 return ERR_PTR(-EINVAL);
71         }
72
73         /* 
74          * Set intparm to subchannel address.
75          * This is fine even on 64bit since the subchannel is always located
76          * under 2G.
77          */
78         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
79         ret = cio_modify(sch);
80         if (ret) {
81                 kfree(sch);
82                 return ERR_PTR(ret);
83         }
84         return sch;
85 }
86
87 static void
88 css_free_subchannel(struct subchannel *sch)
89 {
90         if (sch) {
91                 /* Reset intparm to zeroes. */
92                 sch->schib.pmcw.intparm = 0;
93                 cio_modify(sch);
94                 kfree(sch->lock);
95                 kfree(sch);
96         }
97 }
98
99 static void
100 css_subchannel_release(struct device *dev)
101 {
102         struct subchannel *sch;
103
104         sch = to_subchannel(dev);
105         if (!cio_is_console(sch->schid)) {
106                 kfree(sch->lock);
107                 kfree(sch);
108         }
109 }
110
111 extern int css_get_ssd_info(struct subchannel *sch);
112
113
114 int css_sch_device_register(struct subchannel *sch)
115 {
116         int ret;
117
118         mutex_lock(&sch->reg_mutex);
119         ret = device_register(&sch->dev);
120         mutex_unlock(&sch->reg_mutex);
121         return ret;
122 }
123
124 void css_sch_device_unregister(struct subchannel *sch)
125 {
126         mutex_lock(&sch->reg_mutex);
127         device_unregister(&sch->dev);
128         mutex_unlock(&sch->reg_mutex);
129 }
130
131 static int
132 css_register_subchannel(struct subchannel *sch)
133 {
134         int ret;
135
136         /* Initialize the subchannel structure */
137         sch->dev.parent = &css[0]->device;
138         sch->dev.bus = &css_bus_type;
139         sch->dev.release = &css_subchannel_release;
140         sch->dev.groups = subch_attr_groups;
141
142         css_get_ssd_info(sch);
143
144         /* make it known to the system */
145         ret = css_sch_device_register(sch);
146         if (ret) {
147                 printk (KERN_WARNING "%s: could not register %s\n",
148                         __func__, sch->dev.bus_id);
149                 return ret;
150         }
151         return ret;
152 }
153
154 int
155 css_probe_device(struct subchannel_id schid)
156 {
157         int ret;
158         struct subchannel *sch;
159
160         sch = css_alloc_subchannel(schid);
161         if (IS_ERR(sch))
162                 return PTR_ERR(sch);
163         ret = css_register_subchannel(sch);
164         if (ret)
165                 css_free_subchannel(sch);
166         return ret;
167 }
168
169 static int
170 check_subchannel(struct device * dev, void * data)
171 {
172         struct subchannel *sch;
173         struct subchannel_id *schid = data;
174
175         sch = to_subchannel(dev);
176         return schid_equal(&sch->schid, schid);
177 }
178
179 struct subchannel *
180 get_subchannel_by_schid(struct subchannel_id schid)
181 {
182         struct device *dev;
183
184         dev = bus_find_device(&css_bus_type, NULL,
185                               &schid, check_subchannel);
186
187         return dev ? to_subchannel(dev) : NULL;
188 }
189
190 static inline int css_get_subchannel_status(struct subchannel *sch)
191 {
192         struct schib schib;
193
194         if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
195                 return CIO_GONE;
196         if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
197                 return CIO_REVALIDATE;
198         if (!sch->lpm)
199                 return CIO_NO_PATH;
200         return CIO_OPER;
201 }
202
203 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
204 {
205         int event, ret, disc;
206         unsigned long flags;
207         enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
208
209         spin_lock_irqsave(sch->lock, flags);
210         disc = device_is_disconnected(sch);
211         if (disc && slow) {
212                 /* Disconnected devices are evaluated directly only.*/
213                 spin_unlock_irqrestore(sch->lock, flags);
214                 return 0;
215         }
216         /* No interrupt after machine check - kill pending timers. */
217         device_kill_pending_timer(sch);
218         if (!disc && !slow) {
219                 /* Non-disconnected devices are evaluated on the slow path. */
220                 spin_unlock_irqrestore(sch->lock, flags);
221                 return -EAGAIN;
222         }
223         event = css_get_subchannel_status(sch);
224         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
225                       sch->schid.ssid, sch->schid.sch_no, event,
226                       disc ? "disconnected" : "normal",
227                       slow ? "slow" : "fast");
228         /* Analyze subchannel status. */
229         action = NONE;
230         switch (event) {
231         case CIO_NO_PATH:
232                 if (disc) {
233                         /* Check if paths have become available. */
234                         action = REPROBE;
235                         break;
236                 }
237                 /* fall through */
238         case CIO_GONE:
239                 /* Prevent unwanted effects when opening lock. */
240                 cio_disable_subchannel(sch);
241                 device_set_disconnected(sch);
242                 /* Ask driver what to do with device. */
243                 action = UNREGISTER;
244                 if (sch->driver && sch->driver->notify) {
245                         spin_unlock_irqrestore(sch->lock, flags);
246                         ret = sch->driver->notify(&sch->dev, event);
247                         spin_lock_irqsave(sch->lock, flags);
248                         if (ret)
249                                 action = NONE;
250                 }
251                 break;
252         case CIO_REVALIDATE:
253                 /* Device will be removed, so no notify necessary. */
254                 if (disc)
255                         /* Reprobe because immediate unregister might block. */
256                         action = REPROBE;
257                 else
258                         action = UNREGISTER_PROBE;
259                 break;
260         case CIO_OPER:
261                 if (disc)
262                         /* Get device operational again. */
263                         action = REPROBE;
264                 break;
265         }
266         /* Perform action. */
267         ret = 0;
268         switch (action) {
269         case UNREGISTER:
270         case UNREGISTER_PROBE:
271                 /* Unregister device (will use subchannel lock). */
272                 spin_unlock_irqrestore(sch->lock, flags);
273                 css_sch_device_unregister(sch);
274                 spin_lock_irqsave(sch->lock, flags);
275
276                 /* Reset intparm to zeroes. */
277                 sch->schib.pmcw.intparm = 0;
278                 cio_modify(sch);
279                 break;
280         case REPROBE:
281                 device_trigger_reprobe(sch);
282                 break;
283         default:
284                 break;
285         }
286         spin_unlock_irqrestore(sch->lock, flags);
287         /* Probe if necessary. */
288         if (action == UNREGISTER_PROBE)
289                 ret = css_probe_device(sch->schid);
290
291         return ret;
292 }
293
294 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
295 {
296         struct schib schib;
297
298         if (!slow) {
299                 /* Will be done on the slow path. */
300                 return -EAGAIN;
301         }
302         if (stsch(schid, &schib) || !schib.pmcw.dnv) {
303                 /* Unusable - ignore. */
304                 return 0;
305         }
306         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
307                          "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
308
309         return css_probe_device(schid);
310 }
311
312 static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
313 {
314         struct subchannel *sch;
315         int ret;
316
317         sch = get_subchannel_by_schid(schid);
318         if (sch) {
319                 ret = css_evaluate_known_subchannel(sch, slow);
320                 put_device(&sch->dev);
321         } else
322                 ret = css_evaluate_new_subchannel(schid, slow);
323
324         return ret;
325 }
326
327 static int
328 css_rescan_devices(struct subchannel_id schid, void *data)
329 {
330         return css_evaluate_subchannel(schid, 1);
331 }
332
333 struct slow_subchannel {
334         struct list_head slow_list;
335         struct subchannel_id schid;
336 };
337
338 static LIST_HEAD(slow_subchannels_head);
339 static DEFINE_SPINLOCK(slow_subchannel_lock);
340
341 static void
342 css_trigger_slow_path(struct work_struct *unused)
343 {
344         CIO_TRACE_EVENT(4, "slowpath");
345
346         if (need_rescan) {
347                 need_rescan = 0;
348                 for_each_subchannel(css_rescan_devices, NULL);
349                 return;
350         }
351
352         spin_lock_irq(&slow_subchannel_lock);
353         while (!list_empty(&slow_subchannels_head)) {
354                 struct slow_subchannel *slow_sch =
355                         list_entry(slow_subchannels_head.next,
356                                    struct slow_subchannel, slow_list);
357
358                 list_del_init(slow_subchannels_head.next);
359                 spin_unlock_irq(&slow_subchannel_lock);
360                 css_evaluate_subchannel(slow_sch->schid, 1);
361                 spin_lock_irq(&slow_subchannel_lock);
362                 kfree(slow_sch);
363         }
364         spin_unlock_irq(&slow_subchannel_lock);
365 }
366
367 DECLARE_WORK(slow_path_work, css_trigger_slow_path);
368 struct workqueue_struct *slow_path_wq;
369
370 /* Reprobe subchannel if unregistered. */
371 static int reprobe_subchannel(struct subchannel_id schid, void *data)
372 {
373         struct subchannel *sch;
374         int ret;
375
376         CIO_DEBUG(KERN_INFO, 6, "cio: reprobe 0.%x.%04x\n",
377                   schid.ssid, schid.sch_no);
378         if (need_reprobe)
379                 return -EAGAIN;
380
381         sch = get_subchannel_by_schid(schid);
382         if (sch) {
383                 /* Already known. */
384                 put_device(&sch->dev);
385                 return 0;
386         }
387
388         ret = css_probe_device(schid);
389         switch (ret) {
390         case 0:
391                 break;
392         case -ENXIO:
393         case -ENOMEM:
394                 /* These should abort looping */
395                 break;
396         default:
397                 ret = 0;
398         }
399
400         return ret;
401 }
402
403 /* Work function used to reprobe all unregistered subchannels. */
404 static void reprobe_all(struct work_struct *unused)
405 {
406         int ret;
407
408         CIO_MSG_EVENT(2, "reprobe start\n");
409
410         need_reprobe = 0;
411         /* Make sure initial subchannel scan is done. */
412         wait_event(ccw_device_init_wq,
413                    atomic_read(&ccw_device_init_count) == 0);
414         ret = for_each_subchannel(reprobe_subchannel, NULL);
415
416         CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
417                       need_reprobe);
418 }
419
420 DECLARE_WORK(css_reprobe_work, reprobe_all);
421
422 /* Schedule reprobing of all unregistered subchannels. */
423 void css_schedule_reprobe(void)
424 {
425         need_reprobe = 1;
426         queue_work(ccw_device_work, &css_reprobe_work);
427 }
428
429 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
430
431 /*
432  * Rescan for new devices. FIXME: This is slow.
433  * This function is called when we have lost CRWs due to overflows and we have
434  * to do subchannel housekeeping.
435  */
436 void
437 css_reiterate_subchannels(void)
438 {
439         css_clear_subchannel_slow_list();
440         need_rescan = 1;
441 }
442
443 /*
444  * Called from the machine check handler for subchannel report words.
445  */
446 int
447 css_process_crw(int rsid1, int rsid2)
448 {
449         int ret;
450         struct subchannel_id mchk_schid;
451
452         CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
453                       rsid1, rsid2);
454
455         if (need_rescan)
456                 /* We need to iterate all subchannels anyway. */
457                 return -EAGAIN;
458
459         init_subchannel_id(&mchk_schid);
460         mchk_schid.sch_no = rsid1;
461         if (rsid2 != 0)
462                 mchk_schid.ssid = (rsid2 >> 8) & 3;
463
464         /* 
465          * Since we are always presented with IPI in the CRW, we have to
466          * use stsch() to find out if the subchannel in question has come
467          * or gone.
468          */
469         ret = css_evaluate_subchannel(mchk_schid, 0);
470         if (ret == -EAGAIN) {
471                 if (css_enqueue_subchannel_slow(mchk_schid)) {
472                         css_clear_subchannel_slow_list();
473                         need_rescan = 1;
474                 }
475         }
476         return ret;
477 }
478
479 static int __init
480 __init_channel_subsystem(struct subchannel_id schid, void *data)
481 {
482         struct subchannel *sch;
483         int ret;
484
485         if (cio_is_console(schid))
486                 sch = cio_get_console_subchannel();
487         else {
488                 sch = css_alloc_subchannel(schid);
489                 if (IS_ERR(sch))
490                         ret = PTR_ERR(sch);
491                 else
492                         ret = 0;
493                 switch (ret) {
494                 case 0:
495                         break;
496                 case -ENOMEM:
497                         panic("Out of memory in init_channel_subsystem\n");
498                 /* -ENXIO: no more subchannels. */
499                 case -ENXIO:
500                         return ret;
501                 /* -EIO: this subchannel set not supported. */
502                 case -EIO:
503                         return ret;
504                 default:
505                         return 0;
506                 }
507         }
508         /*
509          * We register ALL valid subchannels in ioinfo, even those
510          * that have been present before init_channel_subsystem.
511          * These subchannels can't have been registered yet (kmalloc
512          * not working) so we do it now. This is true e.g. for the
513          * console subchannel.
514          */
515         css_register_subchannel(sch);
516         return 0;
517 }
518
519 static void __init
520 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
521 {
522         if (css_characteristics_avail && css_general_characteristics.mcss) {
523                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
524                 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
525         } else {
526 #ifdef CONFIG_SMP
527                 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
528 #else
529                 css->global_pgid.pgid_high.cpu_addr = 0;
530 #endif
531         }
532         css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
533         css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
534         css->global_pgid.tod_high = tod_high;
535
536 }
537
538 static void
539 channel_subsystem_release(struct device *dev)
540 {
541         struct channel_subsystem *css;
542
543         css = to_css(dev);
544         mutex_destroy(&css->mutex);
545         kfree(css);
546 }
547
548 static ssize_t
549 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
550                    char *buf)
551 {
552         struct channel_subsystem *css = to_css(dev);
553
554         if (!css)
555                 return 0;
556         return sprintf(buf, "%x\n", css->cm_enabled);
557 }
558
559 static ssize_t
560 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
561                     const char *buf, size_t count)
562 {
563         struct channel_subsystem *css = to_css(dev);
564         int ret;
565
566         switch (buf[0]) {
567         case '0':
568                 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
569                 break;
570         case '1':
571                 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
572                 break;
573         default:
574                 ret = -EINVAL;
575         }
576         return ret < 0 ? ret : count;
577 }
578
579 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
580
581 static inline int __init setup_css(int nr)
582 {
583         u32 tod_high;
584         int ret;
585
586         memset(css[nr], 0, sizeof(struct channel_subsystem));
587         css[nr]->pseudo_subchannel =
588                 kzalloc(sizeof(*css[nr]->pseudo_subchannel), GFP_KERNEL);
589         if (!css[nr]->pseudo_subchannel)
590                 return -ENOMEM;
591         css[nr]->pseudo_subchannel->dev.parent = &css[nr]->device;
592         css[nr]->pseudo_subchannel->dev.release = css_subchannel_release;
593         sprintf(css[nr]->pseudo_subchannel->dev.bus_id, "defunct");
594         ret = cio_create_sch_lock(css[nr]->pseudo_subchannel);
595         if (ret) {
596                 kfree(css[nr]->pseudo_subchannel);
597                 return ret;
598         }
599         mutex_init(&css[nr]->mutex);
600         css[nr]->valid = 1;
601         css[nr]->cssid = nr;
602         sprintf(css[nr]->device.bus_id, "css%x", nr);
603         css[nr]->device.release = channel_subsystem_release;
604         tod_high = (u32) (get_clock() >> 32);
605         css_generate_pgid(css[nr], tod_high);
606         return 0;
607 }
608
609 /*
610  * Now that the driver core is running, we can setup our channel subsystem.
611  * The struct subchannel's are created during probing (except for the
612  * static console subchannel).
613  */
614 static int __init
615 init_channel_subsystem (void)
616 {
617         int ret, i;
618
619         if (chsc_determine_css_characteristics() == 0)
620                 css_characteristics_avail = 1;
621
622         if ((ret = bus_register(&css_bus_type)))
623                 goto out;
624
625         /* Try to enable MSS. */
626         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
627         switch (ret) {
628         case 0: /* Success. */
629                 max_ssid = __MAX_SSID;
630                 break;
631         case -ENOMEM:
632                 goto out_bus;
633         default:
634                 max_ssid = 0;
635         }
636         /* Setup css structure. */
637         for (i = 0; i <= __MAX_CSSID; i++) {
638                 css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
639                 if (!css[i]) {
640                         ret = -ENOMEM;
641                         goto out_unregister;
642                 }
643                 ret = setup_css(i);
644                 if (ret)
645                         goto out_free;
646                 ret = device_register(&css[i]->device);
647                 if (ret)
648                         goto out_free_all;
649                 if (css_characteristics_avail &&
650                     css_chsc_characteristics.secm) {
651                         ret = device_create_file(&css[i]->device,
652                                                  &dev_attr_cm_enable);
653                         if (ret)
654                                 goto out_device;
655                 }
656                 ret = device_register(&css[i]->pseudo_subchannel->dev);
657                 if (ret)
658                         goto out_file;
659         }
660         css_init_done = 1;
661
662         ctl_set_bit(6, 28);
663
664         for_each_subchannel(__init_channel_subsystem, NULL);
665         return 0;
666 out_file:
667         device_remove_file(&css[i]->device, &dev_attr_cm_enable);
668 out_device:
669         device_unregister(&css[i]->device);
670 out_free_all:
671         kfree(css[i]->pseudo_subchannel->lock);
672         kfree(css[i]->pseudo_subchannel);
673 out_free:
674         kfree(css[i]);
675 out_unregister:
676         while (i > 0) {
677                 i--;
678                 device_unregister(&css[i]->pseudo_subchannel->dev);
679                 if (css_characteristics_avail && css_chsc_characteristics.secm)
680                         device_remove_file(&css[i]->device,
681                                            &dev_attr_cm_enable);
682                 device_unregister(&css[i]->device);
683         }
684 out_bus:
685         bus_unregister(&css_bus_type);
686 out:
687         return ret;
688 }
689
690 int sch_is_pseudo_sch(struct subchannel *sch)
691 {
692         return sch == to_css(sch->dev.parent)->pseudo_subchannel;
693 }
694
695 /*
696  * find a driver for a subchannel. They identify by the subchannel
697  * type with the exception that the console subchannel driver has its own
698  * subchannel type although the device is an i/o subchannel
699  */
700 static int
701 css_bus_match (struct device *dev, struct device_driver *drv)
702 {
703         struct subchannel *sch = container_of (dev, struct subchannel, dev);
704         struct css_driver *driver = container_of (drv, struct css_driver, drv);
705
706         if (sch->st == driver->subchannel_type)
707                 return 1;
708
709         return 0;
710 }
711
712 static int
713 css_probe (struct device *dev)
714 {
715         struct subchannel *sch;
716
717         sch = to_subchannel(dev);
718         sch->driver = container_of (dev->driver, struct css_driver, drv);
719         return (sch->driver->probe ? sch->driver->probe(sch) : 0);
720 }
721
722 static int
723 css_remove (struct device *dev)
724 {
725         struct subchannel *sch;
726
727         sch = to_subchannel(dev);
728         return (sch->driver->remove ? sch->driver->remove(sch) : 0);
729 }
730
731 static void
732 css_shutdown (struct device *dev)
733 {
734         struct subchannel *sch;
735
736         sch = to_subchannel(dev);
737         if (sch->driver->shutdown)
738                 sch->driver->shutdown(sch);
739 }
740
741 struct bus_type css_bus_type = {
742         .name     = "css",
743         .match    = css_bus_match,
744         .probe    = css_probe,
745         .remove   = css_remove,
746         .shutdown = css_shutdown,
747 };
748
749 subsys_initcall(init_channel_subsystem);
750
751 int
752 css_enqueue_subchannel_slow(struct subchannel_id schid)
753 {
754         struct slow_subchannel *new_slow_sch;
755         unsigned long flags;
756
757         new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
758         if (!new_slow_sch)
759                 return -ENOMEM;
760         new_slow_sch->schid = schid;
761         spin_lock_irqsave(&slow_subchannel_lock, flags);
762         list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
763         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
764         return 0;
765 }
766
767 void
768 css_clear_subchannel_slow_list(void)
769 {
770         unsigned long flags;
771
772         spin_lock_irqsave(&slow_subchannel_lock, flags);
773         while (!list_empty(&slow_subchannels_head)) {
774                 struct slow_subchannel *slow_sch =
775                         list_entry(slow_subchannels_head.next,
776                                    struct slow_subchannel, slow_list);
777
778                 list_del_init(slow_subchannels_head.next);
779                 kfree(slow_sch);
780         }
781         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
782 }
783
784
785
786 int
787 css_slow_subchannels_exist(void)
788 {
789         return (!list_empty(&slow_subchannels_head));
790 }
791
792 MODULE_LICENSE("GPL");
793 EXPORT_SYMBOL(css_bus_type);
794 EXPORT_SYMBOL_GPL(css_characteristics_avail);