2 * drivers/s390/cio/css.c
3 * driver for channel subsystem
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/slab.h>
14 #include <linux/errno.h>
15 #include <linux/list.h>
16 #include <linux/reboot.h>
20 #include "cio_debug.h"
27 int css_init_done = 0;
28 static int need_reprobe = 0;
29 static int max_ssid = 0;
31 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
33 int css_characteristics_avail = 0;
36 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
38 struct subchannel_id schid;
41 init_subchannel_id(&schid);
45 ret = fn(schid, data);
48 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
50 } while (schid.ssid++ < max_ssid);
54 static struct subchannel *
55 css_alloc_subchannel(struct subchannel_id schid)
57 struct subchannel *sch;
60 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
62 return ERR_PTR(-ENOMEM);
63 ret = cio_validate_subchannel (sch, schid);
69 if (sch->st != SUBCHANNEL_TYPE_IO) {
70 /* For now we ignore all non-io subchannels. */
72 return ERR_PTR(-EINVAL);
76 * Set intparm to subchannel address.
77 * This is fine even on 64bit since the subchannel is always located
80 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
81 ret = cio_modify(sch);
91 css_free_subchannel(struct subchannel *sch)
94 /* Reset intparm to zeroes. */
95 sch->schib.pmcw.intparm = 0;
103 css_subchannel_release(struct device *dev)
105 struct subchannel *sch;
107 sch = to_subchannel(dev);
108 if (!cio_is_console(sch->schid)) {
114 static int css_sch_device_register(struct subchannel *sch)
118 mutex_lock(&sch->reg_mutex);
119 ret = device_register(&sch->dev);
120 mutex_unlock(&sch->reg_mutex);
124 void css_sch_device_unregister(struct subchannel *sch)
126 mutex_lock(&sch->reg_mutex);
127 device_unregister(&sch->dev);
128 mutex_unlock(&sch->reg_mutex);
131 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
136 memset(ssd, 0, sizeof(struct chsc_ssd_info));
137 ssd->path_mask = pmcw->pim;
138 for (i = 0; i < 8; i++) {
140 if (pmcw->pim & mask) {
141 chp_id_init(&ssd->chpid[i]);
142 ssd->chpid[i].id = pmcw->chpid[i];
147 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
152 for (i = 0; i < 8; i++) {
154 if (ssd->path_mask & mask)
155 if (!chp_is_registered(ssd->chpid[i]))
156 chp_new(ssd->chpid[i]);
160 void css_update_ssd_info(struct subchannel *sch)
164 if (cio_is_console(sch->schid)) {
165 /* Console is initialized too early for functions requiring
166 * memory allocation. */
167 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
169 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
171 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
172 ssd_register_chpids(&sch->ssd_info);
176 static int css_register_subchannel(struct subchannel *sch)
180 /* Initialize the subchannel structure */
181 sch->dev.parent = &channel_subsystems[0]->device;
182 sch->dev.bus = &css_bus_type;
183 sch->dev.release = &css_subchannel_release;
184 sch->dev.groups = subch_attr_groups;
186 * We don't want to generate uevents for I/O subchannels that don't
187 * have a working ccw device behind them since they will be
188 * unregistered before they can be used anyway, so we delay the add
189 * uevent until after device recognition was successful.
191 if (!cio_is_console(sch->schid))
192 /* Console is special, no need to suppress. */
193 sch->dev.uevent_suppress = 1;
194 css_update_ssd_info(sch);
195 /* make it known to the system */
196 ret = css_sch_device_register(sch);
198 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
199 sch->schid.ssid, sch->schid.sch_no, ret);
205 static int css_probe_device(struct subchannel_id schid)
208 struct subchannel *sch;
210 sch = css_alloc_subchannel(schid);
213 ret = css_register_subchannel(sch);
215 css_free_subchannel(sch);
220 check_subchannel(struct device * dev, void * data)
222 struct subchannel *sch;
223 struct subchannel_id *schid = data;
225 sch = to_subchannel(dev);
226 return schid_equal(&sch->schid, schid);
230 get_subchannel_by_schid(struct subchannel_id schid)
234 dev = bus_find_device(&css_bus_type, NULL,
235 &schid, check_subchannel);
237 return dev ? to_subchannel(dev) : NULL;
241 * css_sch_is_valid() - check if a subchannel is valid
242 * @schib: subchannel information block for the subchannel
244 int css_sch_is_valid(struct schib *schib)
246 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
250 EXPORT_SYMBOL_GPL(css_sch_is_valid);
252 static int css_get_subchannel_status(struct subchannel *sch)
256 if (stsch(sch->schid, &schib))
258 if (!css_sch_is_valid(&schib))
260 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
261 return CIO_REVALIDATE;
267 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
269 int event, ret, disc;
271 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
273 spin_lock_irqsave(sch->lock, flags);
274 disc = device_is_disconnected(sch);
276 /* Disconnected devices are evaluated directly only.*/
277 spin_unlock_irqrestore(sch->lock, flags);
280 /* No interrupt after machine check - kill pending timers. */
281 device_kill_pending_timer(sch);
282 if (!disc && !slow) {
283 /* Non-disconnected devices are evaluated on the slow path. */
284 spin_unlock_irqrestore(sch->lock, flags);
287 event = css_get_subchannel_status(sch);
288 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
289 sch->schid.ssid, sch->schid.sch_no, event,
290 disc ? "disconnected" : "normal",
291 slow ? "slow" : "fast");
292 /* Analyze subchannel status. */
297 /* Check if paths have become available. */
303 /* Prevent unwanted effects when opening lock. */
304 cio_disable_subchannel(sch);
305 device_set_disconnected(sch);
306 /* Ask driver what to do with device. */
308 if (sch->driver && sch->driver->notify) {
309 spin_unlock_irqrestore(sch->lock, flags);
310 ret = sch->driver->notify(sch, event);
311 spin_lock_irqsave(sch->lock, flags);
317 /* Device will be removed, so no notify necessary. */
319 /* Reprobe because immediate unregister might block. */
322 action = UNREGISTER_PROBE;
326 /* Get device operational again. */
330 /* Perform action. */
334 case UNREGISTER_PROBE:
335 /* Unregister device (will use subchannel lock). */
336 spin_unlock_irqrestore(sch->lock, flags);
337 css_sch_device_unregister(sch);
338 spin_lock_irqsave(sch->lock, flags);
340 /* Reset intparm to zeroes. */
341 sch->schib.pmcw.intparm = 0;
345 device_trigger_reprobe(sch);
350 spin_unlock_irqrestore(sch->lock, flags);
351 /* Probe if necessary. */
352 if (action == UNREGISTER_PROBE)
353 ret = css_probe_device(sch->schid);
358 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
363 /* Will be done on the slow path. */
366 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
367 /* Unusable - ignore. */
370 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
371 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
373 return css_probe_device(schid);
376 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
378 struct subchannel *sch;
381 sch = get_subchannel_by_schid(schid);
383 ret = css_evaluate_known_subchannel(sch, slow);
384 put_device(&sch->dev);
386 ret = css_evaluate_new_subchannel(schid, slow);
388 css_schedule_eval(schid);
391 static struct idset *slow_subchannel_set;
392 static spinlock_t slow_subchannel_lock;
394 static int __init slow_subchannel_init(void)
396 spin_lock_init(&slow_subchannel_lock);
397 slow_subchannel_set = idset_sch_new();
398 if (!slow_subchannel_set) {
399 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
405 static void css_slow_path_func(struct work_struct *unused)
407 struct subchannel_id schid;
409 CIO_TRACE_EVENT(4, "slowpath");
410 spin_lock_irq(&slow_subchannel_lock);
411 init_subchannel_id(&schid);
412 while (idset_sch_get_first(slow_subchannel_set, &schid)) {
413 idset_sch_del(slow_subchannel_set, schid);
414 spin_unlock_irq(&slow_subchannel_lock);
415 css_evaluate_subchannel(schid, 1);
416 spin_lock_irq(&slow_subchannel_lock);
418 spin_unlock_irq(&slow_subchannel_lock);
421 static DECLARE_WORK(slow_path_work, css_slow_path_func);
422 struct workqueue_struct *slow_path_wq;
424 void css_schedule_eval(struct subchannel_id schid)
428 spin_lock_irqsave(&slow_subchannel_lock, flags);
429 idset_sch_add(slow_subchannel_set, schid);
430 queue_work(slow_path_wq, &slow_path_work);
431 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
434 void css_schedule_eval_all(void)
438 spin_lock_irqsave(&slow_subchannel_lock, flags);
439 idset_fill(slow_subchannel_set);
440 queue_work(slow_path_wq, &slow_path_work);
441 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
444 /* Reprobe subchannel if unregistered. */
445 static int reprobe_subchannel(struct subchannel_id schid, void *data)
447 struct subchannel *sch;
450 CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
451 schid.ssid, schid.sch_no);
455 sch = get_subchannel_by_schid(schid);
458 put_device(&sch->dev);
462 ret = css_probe_device(schid);
469 /* These should abort looping */
478 /* Work function used to reprobe all unregistered subchannels. */
479 static void reprobe_all(struct work_struct *unused)
483 CIO_MSG_EVENT(2, "reprobe start\n");
486 /* Make sure initial subchannel scan is done. */
487 wait_event(ccw_device_init_wq,
488 atomic_read(&ccw_device_init_count) == 0);
489 ret = for_each_subchannel(reprobe_subchannel, NULL);
491 CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
495 static DECLARE_WORK(css_reprobe_work, reprobe_all);
497 /* Schedule reprobing of all unregistered subchannels. */
498 void css_schedule_reprobe(void)
501 queue_work(slow_path_wq, &css_reprobe_work);
504 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
507 * Called from the machine check handler for subchannel report words.
509 void css_process_crw(int rsid1, int rsid2)
511 struct subchannel_id mchk_schid;
513 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
515 init_subchannel_id(&mchk_schid);
516 mchk_schid.sch_no = rsid1;
518 mchk_schid.ssid = (rsid2 >> 8) & 3;
521 * Since we are always presented with IPI in the CRW, we have to
522 * use stsch() to find out if the subchannel in question has come
525 css_evaluate_subchannel(mchk_schid, 0);
529 __init_channel_subsystem(struct subchannel_id schid, void *data)
531 struct subchannel *sch;
534 if (cio_is_console(schid))
535 sch = cio_get_console_subchannel();
537 sch = css_alloc_subchannel(schid);
546 panic("Out of memory in init_channel_subsystem\n");
547 /* -ENXIO: no more subchannels. */
550 /* -EIO: this subchannel set not supported. */
558 * We register ALL valid subchannels in ioinfo, even those
559 * that have been present before init_channel_subsystem.
560 * These subchannels can't have been registered yet (kmalloc
561 * not working) so we do it now. This is true e.g. for the
562 * console subchannel.
564 css_register_subchannel(sch);
569 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
571 if (css_characteristics_avail && css_general_characteristics.mcss) {
572 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
573 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
576 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
578 css->global_pgid.pgid_high.cpu_addr = 0;
581 css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
582 css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
583 css->global_pgid.tod_high = tod_high;
588 channel_subsystem_release(struct device *dev)
590 struct channel_subsystem *css;
593 mutex_destroy(&css->mutex);
598 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
601 struct channel_subsystem *css = to_css(dev);
605 return sprintf(buf, "%x\n", css->cm_enabled);
609 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
610 const char *buf, size_t count)
612 struct channel_subsystem *css = to_css(dev);
617 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
620 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
625 return ret < 0 ? ret : count;
628 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
630 static int __init setup_css(int nr)
634 struct channel_subsystem *css;
636 css = channel_subsystems[nr];
637 memset(css, 0, sizeof(struct channel_subsystem));
638 css->pseudo_subchannel =
639 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
640 if (!css->pseudo_subchannel)
642 css->pseudo_subchannel->dev.parent = &css->device;
643 css->pseudo_subchannel->dev.release = css_subchannel_release;
644 sprintf(css->pseudo_subchannel->dev.bus_id, "defunct");
645 ret = cio_create_sch_lock(css->pseudo_subchannel);
647 kfree(css->pseudo_subchannel);
650 mutex_init(&css->mutex);
653 sprintf(css->device.bus_id, "css%x", nr);
654 css->device.release = channel_subsystem_release;
655 tod_high = (u32) (get_clock() >> 32);
656 css_generate_pgid(css, tod_high);
660 static int css_reboot_event(struct notifier_block *this,
667 for (i = 0; i <= __MAX_CSSID; i++) {
668 struct channel_subsystem *css;
670 css = channel_subsystems[i];
672 if (chsc_secm(css, 0))
679 static struct notifier_block css_reboot_notifier = {
680 .notifier_call = css_reboot_event,
684 * Now that the driver core is running, we can setup our channel subsystem.
685 * The struct subchannel's are created during probing (except for the
686 * static console subchannel).
689 init_channel_subsystem (void)
693 ret = chsc_determine_css_characteristics();
695 goto out; /* No need to continue. */
697 css_characteristics_avail = 1;
699 ret = chsc_alloc_sei_area();
703 ret = slow_subchannel_init();
707 if ((ret = bus_register(&css_bus_type)))
710 /* Try to enable MSS. */
711 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
713 case 0: /* Success. */
714 max_ssid = __MAX_SSID;
721 /* Setup css structure. */
722 for (i = 0; i <= __MAX_CSSID; i++) {
723 struct channel_subsystem *css;
725 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
730 channel_subsystems[i] = css;
734 ret = device_register(&css->device);
737 if (css_characteristics_avail &&
738 css_chsc_characteristics.secm) {
739 ret = device_create_file(&css->device,
740 &dev_attr_cm_enable);
744 ret = device_register(&css->pseudo_subchannel->dev);
748 ret = register_reboot_notifier(&css_reboot_notifier);
755 for_each_subchannel(__init_channel_subsystem, NULL);
758 device_unregister(&channel_subsystems[i]->pseudo_subchannel->dev);
760 device_remove_file(&channel_subsystems[i]->device,
761 &dev_attr_cm_enable);
763 device_unregister(&channel_subsystems[i]->device);
765 kfree(channel_subsystems[i]->pseudo_subchannel->lock);
766 kfree(channel_subsystems[i]->pseudo_subchannel);
768 kfree(channel_subsystems[i]);
771 struct channel_subsystem *css;
774 css = channel_subsystems[i];
775 device_unregister(&css->pseudo_subchannel->dev);
776 if (css_characteristics_avail && css_chsc_characteristics.secm)
777 device_remove_file(&css->device,
778 &dev_attr_cm_enable);
779 device_unregister(&css->device);
782 bus_unregister(&css_bus_type);
784 chsc_free_sei_area();
785 kfree(slow_subchannel_set);
786 printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
791 int sch_is_pseudo_sch(struct subchannel *sch)
793 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
797 * find a driver for a subchannel. They identify by the subchannel
798 * type with the exception that the console subchannel driver has its own
799 * subchannel type although the device is an i/o subchannel
802 css_bus_match (struct device *dev, struct device_driver *drv)
804 struct subchannel *sch = to_subchannel(dev);
805 struct css_driver *driver = to_cssdriver(drv);
807 if (sch->st == driver->subchannel_type)
813 static int css_probe(struct device *dev)
815 struct subchannel *sch;
818 sch = to_subchannel(dev);
819 sch->driver = to_cssdriver(dev->driver);
820 ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
826 static int css_remove(struct device *dev)
828 struct subchannel *sch;
831 sch = to_subchannel(dev);
832 ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
837 static void css_shutdown(struct device *dev)
839 struct subchannel *sch;
841 sch = to_subchannel(dev);
842 if (sch->driver && sch->driver->shutdown)
843 sch->driver->shutdown(sch);
846 struct bus_type css_bus_type = {
848 .match = css_bus_match,
850 .remove = css_remove,
851 .shutdown = css_shutdown,
855 * css_driver_register - register a css driver
856 * @cdrv: css driver to register
858 * This is mainly a wrapper around driver_register that sets name
859 * and bus_type in the embedded struct device_driver correctly.
861 int css_driver_register(struct css_driver *cdrv)
863 cdrv->drv.name = cdrv->name;
864 cdrv->drv.bus = &css_bus_type;
865 cdrv->drv.owner = cdrv->owner;
866 return driver_register(&cdrv->drv);
868 EXPORT_SYMBOL_GPL(css_driver_register);
871 * css_driver_unregister - unregister a css driver
872 * @cdrv: css driver to unregister
874 * This is a wrapper around driver_unregister.
876 void css_driver_unregister(struct css_driver *cdrv)
878 driver_unregister(&cdrv->drv);
880 EXPORT_SYMBOL_GPL(css_driver_unregister);
882 subsys_initcall(init_channel_subsystem);
884 MODULE_LICENSE("GPL");
885 EXPORT_SYMBOL(css_bus_type);
886 EXPORT_SYMBOL_GPL(css_characteristics_avail);