2 * drivers/s390/cio/device.c
3 * bus driver for ccw devices
5 * Copyright IBM Corp. 2002,2008
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/spinlock.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/device.h>
18 #include <linux/workqueue.h>
19 #include <linux/timer.h>
21 #include <asm/ccwdev.h>
23 #include <asm/param.h> /* HZ */
28 #include "cio_debug.h"
34 static struct timer_list recovery_timer;
35 static DEFINE_SPINLOCK(recovery_lock);
36 static int recovery_phase;
37 static const unsigned long recovery_delay[] = { 3, 30, 300 };
39 /******************* bus type handling ***********************/
41 /* The Linux driver model distinguishes between a bus type and
42 * the bus itself. Of course we only have one channel
43 * subsystem driver and one channel system per machine, but
44 * we still use the abstraction. T.R. says it's a good idea. */
46 ccw_bus_match (struct device * dev, struct device_driver * drv)
48 struct ccw_device *cdev = to_ccwdev(dev);
49 struct ccw_driver *cdrv = to_ccwdrv(drv);
50 const struct ccw_device_id *ids = cdrv->ids, *found;
55 found = ccw_device_id_match(ids, &cdev->id);
59 cdev->id.driver_info = found->driver_info;
64 /* Store modalias string delimited by prefix/suffix string into buffer with
65 * specified size. Return length of resulting string (excluding trailing '\0')
66 * even if string doesn't fit buffer (snprintf semantics). */
67 static int snprint_alias(char *buf, size_t size,
68 struct ccw_device_id *id, const char *suffix)
72 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
78 if (id->dev_type != 0)
79 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
80 id->dev_model, suffix);
82 len += snprintf(buf, size, "dtdm%s", suffix);
87 /* Set up environment variables for ccw device uevent. Return 0 on success,
88 * non-zero otherwise. */
89 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
91 struct ccw_device *cdev = to_ccwdev(dev);
92 struct ccw_device_id *id = &(cdev->id);
94 char modalias_buf[30];
97 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
102 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
106 /* The next two can be zero, that's ok for us */
108 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
113 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
118 snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
119 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
123 struct bus_type ccw_bus_type;
125 static void io_subchannel_irq(struct subchannel *);
126 static int io_subchannel_probe(struct subchannel *);
127 static int io_subchannel_remove(struct subchannel *);
128 static void io_subchannel_shutdown(struct subchannel *);
129 static int io_subchannel_sch_event(struct subchannel *, int);
130 static int io_subchannel_chp_event(struct subchannel *, void *, int);
132 static struct css_driver io_subchannel_driver = {
133 .owner = THIS_MODULE,
134 .subchannel_type = SUBCHANNEL_TYPE_IO,
135 .name = "io_subchannel",
136 .irq = io_subchannel_irq,
137 .sch_event = io_subchannel_sch_event,
138 .chp_event = io_subchannel_chp_event,
139 .probe = io_subchannel_probe,
140 .remove = io_subchannel_remove,
141 .shutdown = io_subchannel_shutdown,
144 struct workqueue_struct *ccw_device_work;
145 struct workqueue_struct *ccw_device_notify_work;
146 wait_queue_head_t ccw_device_init_wq;
147 atomic_t ccw_device_init_count;
149 static void recovery_func(unsigned long data);
152 init_ccw_bus_type (void)
156 init_waitqueue_head(&ccw_device_init_wq);
157 atomic_set(&ccw_device_init_count, 0);
158 setup_timer(&recovery_timer, recovery_func, 0);
160 ccw_device_work = create_singlethread_workqueue("cio");
161 if (!ccw_device_work)
162 return -ENOMEM; /* FIXME: better errno ? */
163 ccw_device_notify_work = create_singlethread_workqueue("cio_notify");
164 if (!ccw_device_notify_work) {
165 ret = -ENOMEM; /* FIXME: better errno ? */
168 slow_path_wq = create_singlethread_workqueue("kslowcrw");
170 ret = -ENOMEM; /* FIXME: better errno ? */
173 if ((ret = bus_register (&ccw_bus_type)))
176 ret = css_driver_register(&io_subchannel_driver);
180 wait_event(ccw_device_init_wq,
181 atomic_read(&ccw_device_init_count) == 0);
182 flush_workqueue(ccw_device_work);
186 destroy_workqueue(ccw_device_work);
187 if (ccw_device_notify_work)
188 destroy_workqueue(ccw_device_notify_work);
190 destroy_workqueue(slow_path_wq);
195 cleanup_ccw_bus_type (void)
197 css_driver_unregister(&io_subchannel_driver);
198 bus_unregister(&ccw_bus_type);
199 destroy_workqueue(ccw_device_notify_work);
200 destroy_workqueue(ccw_device_work);
203 subsys_initcall(init_ccw_bus_type);
204 module_exit(cleanup_ccw_bus_type);
206 /************************ device handling **************************/
209 * A ccw_device has some interfaces in sysfs in addition to the
211 * The following entries are designed to export the information which
212 * resided in 2.4 in /proc/subchannels. Subchannel and device number
213 * are obvious, so they don't have an entry :)
214 * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
217 chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
219 struct subchannel *sch = to_subchannel(dev);
220 struct chsc_ssd_info *ssd = &sch->ssd_info;
225 for (chp = 0; chp < 8; chp++) {
227 if (ssd->path_mask & mask)
228 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
230 ret += sprintf(buf + ret, "00 ");
232 ret += sprintf (buf+ret, "\n");
233 return min((ssize_t)PAGE_SIZE, ret);
237 pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
239 struct subchannel *sch = to_subchannel(dev);
240 struct pmcw *pmcw = &sch->schib.pmcw;
242 return sprintf (buf, "%02x %02x %02x\n",
243 pmcw->pim, pmcw->pam, pmcw->pom);
247 devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
249 struct ccw_device *cdev = to_ccwdev(dev);
250 struct ccw_device_id *id = &(cdev->id);
252 if (id->dev_type != 0)
253 return sprintf(buf, "%04x/%02x\n",
254 id->dev_type, id->dev_model);
256 return sprintf(buf, "n/a\n");
260 cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
262 struct ccw_device *cdev = to_ccwdev(dev);
263 struct ccw_device_id *id = &(cdev->id);
265 return sprintf(buf, "%04x/%02x\n",
266 id->cu_type, id->cu_model);
270 modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
272 struct ccw_device *cdev = to_ccwdev(dev);
273 struct ccw_device_id *id = &(cdev->id);
276 len = snprint_alias(buf, PAGE_SIZE, id, "\n");
278 return len > PAGE_SIZE ? PAGE_SIZE : len;
282 online_show (struct device *dev, struct device_attribute *attr, char *buf)
284 struct ccw_device *cdev = to_ccwdev(dev);
286 return sprintf(buf, cdev->online ? "1\n" : "0\n");
289 int ccw_device_is_orphan(struct ccw_device *cdev)
291 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
294 static void ccw_device_unregister(struct ccw_device *cdev)
296 if (test_and_clear_bit(1, &cdev->private->registered))
297 device_del(&cdev->dev);
300 static void ccw_device_remove_orphan_cb(struct device *dev)
302 struct ccw_device *cdev = to_ccwdev(dev);
304 ccw_device_unregister(cdev);
305 put_device(&cdev->dev);
308 static void ccw_device_remove_sch_cb(struct device *dev)
310 struct subchannel *sch;
312 sch = to_subchannel(dev);
313 css_sch_device_unregister(sch);
314 /* Reset intparm to zeroes. */
315 sch->schib.pmcw.intparm = 0;
317 put_device(&sch->dev);
321 ccw_device_remove_disconnected(struct ccw_device *cdev)
327 * Forced offline in disconnected state means
328 * 'throw away device'.
330 if (ccw_device_is_orphan(cdev)) {
332 * Deregister ccw device.
333 * Unfortunately, we cannot do this directly from the
336 spin_lock_irqsave(cdev->ccwlock, flags);
337 cdev->private->state = DEV_STATE_NOT_OPER;
338 spin_unlock_irqrestore(cdev->ccwlock, flags);
339 rc = device_schedule_callback(&cdev->dev,
340 ccw_device_remove_orphan_cb);
342 CIO_MSG_EVENT(0, "Couldn't unregister orphan "
344 cdev->private->dev_id.ssid,
345 cdev->private->dev_id.devno);
348 /* Deregister subchannel, which will kill the ccw device. */
349 rc = device_schedule_callback(cdev->dev.parent,
350 ccw_device_remove_sch_cb);
352 CIO_MSG_EVENT(0, "Couldn't unregister disconnected device "
354 cdev->private->dev_id.ssid,
355 cdev->private->dev_id.devno);
359 * ccw_device_set_offline() - disable a ccw device for I/O
360 * @cdev: target ccw device
362 * This function calls the driver's set_offline() function for @cdev, if
363 * given, and then disables @cdev.
365 * %0 on success and a negative error value on failure.
367 * enabled, ccw device lock not held
369 int ccw_device_set_offline(struct ccw_device *cdev)
375 if (!cdev->online || !cdev->drv)
378 if (cdev->drv->set_offline) {
379 ret = cdev->drv->set_offline(cdev);
384 spin_lock_irq(cdev->ccwlock);
385 ret = ccw_device_offline(cdev);
386 if (ret == -ENODEV) {
387 if (cdev->private->state != DEV_STATE_NOT_OPER) {
388 cdev->private->state = DEV_STATE_OFFLINE;
389 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
391 spin_unlock_irq(cdev->ccwlock);
394 spin_unlock_irq(cdev->ccwlock);
396 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
398 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
399 "device 0.%x.%04x\n",
400 ret, cdev->private->dev_id.ssid,
401 cdev->private->dev_id.devno);
408 * ccw_device_set_online() - enable a ccw device for I/O
409 * @cdev: target ccw device
411 * This function first enables @cdev and then calls the driver's set_online()
412 * function for @cdev, if given. If set_online() returns an error, @cdev is
415 * %0 on success and a negative error value on failure.
417 * enabled, ccw device lock not held
419 int ccw_device_set_online(struct ccw_device *cdev)
425 if (cdev->online || !cdev->drv)
428 spin_lock_irq(cdev->ccwlock);
429 ret = ccw_device_online(cdev);
430 spin_unlock_irq(cdev->ccwlock);
432 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
434 CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
435 "device 0.%x.%04x\n",
436 ret, cdev->private->dev_id.ssid,
437 cdev->private->dev_id.devno);
440 if (cdev->private->state != DEV_STATE_ONLINE)
442 if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
446 spin_lock_irq(cdev->ccwlock);
447 ret = ccw_device_offline(cdev);
448 spin_unlock_irq(cdev->ccwlock);
450 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
452 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
453 "device 0.%x.%04x\n",
454 ret, cdev->private->dev_id.ssid,
455 cdev->private->dev_id.devno);
456 return (ret == 0) ? -ENODEV : ret;
459 static void online_store_handle_offline(struct ccw_device *cdev)
461 if (cdev->private->state == DEV_STATE_DISCONNECTED)
462 ccw_device_remove_disconnected(cdev);
463 else if (cdev->drv && cdev->drv->set_offline)
464 ccw_device_set_offline(cdev);
467 static int online_store_recog_and_online(struct ccw_device *cdev)
471 /* Do device recognition, if needed. */
472 if (cdev->id.cu_type == 0) {
473 ret = ccw_device_recognition(cdev);
475 CIO_MSG_EVENT(0, "Couldn't start recognition "
476 "for device 0.%x.%04x (ret=%d)\n",
477 cdev->private->dev_id.ssid,
478 cdev->private->dev_id.devno, ret);
481 wait_event(cdev->private->wait_q,
482 cdev->private->flags.recog_done);
484 if (cdev->drv && cdev->drv->set_online)
485 ccw_device_set_online(cdev);
488 static void online_store_handle_online(struct ccw_device *cdev, int force)
492 ret = online_store_recog_and_online(cdev);
495 if (force && cdev->private->state == DEV_STATE_BOXED) {
496 ret = ccw_device_stlck(cdev);
499 "ccw_device_stlck returned %d!\n", ret);
502 if (cdev->id.cu_type == 0)
503 cdev->private->state = DEV_STATE_NOT_OPER;
504 online_store_recog_and_online(cdev);
509 static ssize_t online_store (struct device *dev, struct device_attribute *attr,
510 const char *buf, size_t count)
512 struct ccw_device *cdev = to_ccwdev(dev);
516 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
519 if (cdev->drv && !try_module_get(cdev->drv->owner)) {
520 atomic_set(&cdev->private->onoff, 0);
523 if (!strncmp(buf, "force\n", count)) {
529 ret = strict_strtoul(buf, 16, &i);
535 online_store_handle_offline(cdev);
539 online_store_handle_online(cdev, force);
547 module_put(cdev->drv->owner);
548 atomic_set(&cdev->private->onoff, 0);
553 available_show (struct device *dev, struct device_attribute *attr, char *buf)
555 struct ccw_device *cdev = to_ccwdev(dev);
556 struct subchannel *sch;
558 if (ccw_device_is_orphan(cdev))
559 return sprintf(buf, "no device\n");
560 switch (cdev->private->state) {
561 case DEV_STATE_BOXED:
562 return sprintf(buf, "boxed\n");
563 case DEV_STATE_DISCONNECTED:
564 case DEV_STATE_DISCONNECTED_SENSE_ID:
565 case DEV_STATE_NOT_OPER:
566 sch = to_subchannel(dev->parent);
568 return sprintf(buf, "no path\n");
570 return sprintf(buf, "no device\n");
572 /* All other states considered fine. */
573 return sprintf(buf, "good\n");
577 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
578 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
579 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
580 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
581 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
582 static DEVICE_ATTR(online, 0644, online_show, online_store);
583 static DEVICE_ATTR(availability, 0444, available_show, NULL);
585 static struct attribute *io_subchannel_attrs[] = {
586 &dev_attr_chpids.attr,
587 &dev_attr_pimpampom.attr,
591 static struct attribute_group io_subchannel_attr_group = {
592 .attrs = io_subchannel_attrs,
595 static struct attribute * ccwdev_attrs[] = {
596 &dev_attr_devtype.attr,
597 &dev_attr_cutype.attr,
598 &dev_attr_modalias.attr,
599 &dev_attr_online.attr,
600 &dev_attr_cmb_enable.attr,
601 &dev_attr_availability.attr,
605 static struct attribute_group ccwdev_attr_group = {
606 .attrs = ccwdev_attrs,
609 static struct attribute_group *ccwdev_attr_groups[] = {
614 /* this is a simple abstraction for device_register that sets the
615 * correct bus type and adds the bus specific files */
616 static int ccw_device_register(struct ccw_device *cdev)
618 struct device *dev = &cdev->dev;
621 dev->bus = &ccw_bus_type;
623 if ((ret = device_add(dev)))
626 set_bit(1, &cdev->private->registered);
631 struct ccw_dev_id dev_id;
632 struct ccw_device * sibling;
636 match_devno(struct device * dev, void * data)
638 struct match_data * d = data;
639 struct ccw_device * cdev;
641 cdev = to_ccwdev(dev);
642 if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
643 !ccw_device_is_orphan(cdev) &&
644 ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
645 (cdev != d->sibling))
650 static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
651 struct ccw_device *sibling)
654 struct match_data data;
656 data.dev_id = *dev_id;
657 data.sibling = sibling;
658 dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
660 return dev ? to_ccwdev(dev) : NULL;
663 static int match_orphan(struct device *dev, void *data)
665 struct ccw_dev_id *dev_id;
666 struct ccw_device *cdev;
669 cdev = to_ccwdev(dev);
670 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
673 static struct ccw_device *
674 get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
675 struct ccw_dev_id *dev_id)
679 dev = device_find_child(&css->pseudo_subchannel->dev, dev_id,
682 return dev ? to_ccwdev(dev) : NULL;
686 ccw_device_add_changed(struct work_struct *work)
688 struct ccw_device_private *priv;
689 struct ccw_device *cdev;
691 priv = container_of(work, struct ccw_device_private, kick_work);
693 if (device_add(&cdev->dev)) {
694 put_device(&cdev->dev);
697 set_bit(1, &cdev->private->registered);
700 void ccw_device_do_unreg_rereg(struct work_struct *work)
702 struct ccw_device_private *priv;
703 struct ccw_device *cdev;
704 struct subchannel *sch;
706 priv = container_of(work, struct ccw_device_private, kick_work);
708 sch = to_subchannel(cdev->dev.parent);
710 ccw_device_unregister(cdev);
711 PREPARE_WORK(&cdev->private->kick_work,
712 ccw_device_add_changed);
713 queue_work(ccw_device_work, &cdev->private->kick_work);
717 ccw_device_release(struct device *dev)
719 struct ccw_device *cdev;
721 cdev = to_ccwdev(dev);
722 kfree(cdev->private);
726 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
728 struct ccw_device *cdev;
730 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
732 cdev->private = kzalloc(sizeof(struct ccw_device_private),
733 GFP_KERNEL | GFP_DMA);
738 return ERR_PTR(-ENOMEM);
741 static int io_subchannel_initialize_dev(struct subchannel *sch,
742 struct ccw_device *cdev)
744 cdev->private->cdev = cdev;
745 atomic_set(&cdev->private->onoff, 0);
746 cdev->dev.parent = &sch->dev;
747 cdev->dev.release = ccw_device_release;
748 INIT_WORK(&cdev->private->kick_work, NULL);
749 cdev->dev.groups = ccwdev_attr_groups;
750 /* Do first half of device_register. */
751 device_initialize(&cdev->dev);
752 if (!get_device(&sch->dev)) {
753 if (cdev->dev.release)
754 cdev->dev.release(&cdev->dev);
760 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
762 struct ccw_device *cdev;
765 cdev = io_subchannel_allocate_dev(sch);
767 ret = io_subchannel_initialize_dev(sch, cdev);
776 static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
778 static void sch_attach_device(struct subchannel *sch,
779 struct ccw_device *cdev)
781 css_update_ssd_info(sch);
782 spin_lock_irq(sch->lock);
783 sch_set_cdev(sch, cdev);
784 cdev->private->schid = sch->schid;
785 cdev->ccwlock = sch->lock;
786 ccw_device_trigger_reprobe(cdev);
787 spin_unlock_irq(sch->lock);
790 static void sch_attach_disconnected_device(struct subchannel *sch,
791 struct ccw_device *cdev)
793 struct subchannel *other_sch;
796 other_sch = to_subchannel(get_device(cdev->dev.parent));
797 ret = device_move(&cdev->dev, &sch->dev);
799 CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
800 "(ret=%d)!\n", cdev->private->dev_id.ssid,
801 cdev->private->dev_id.devno, ret);
802 put_device(&other_sch->dev);
805 sch_set_cdev(other_sch, NULL);
806 /* No need to keep a subchannel without ccw device around. */
807 css_sch_device_unregister(other_sch);
808 put_device(&other_sch->dev);
809 sch_attach_device(sch, cdev);
812 static void sch_attach_orphaned_device(struct subchannel *sch,
813 struct ccw_device *cdev)
817 /* Try to move the ccw device to its new subchannel. */
818 ret = device_move(&cdev->dev, &sch->dev);
820 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
821 "failed (ret=%d)!\n",
822 cdev->private->dev_id.ssid,
823 cdev->private->dev_id.devno, ret);
826 sch_attach_device(sch, cdev);
829 static void sch_create_and_recog_new_device(struct subchannel *sch)
831 struct ccw_device *cdev;
833 /* Need to allocate a new ccw device. */
834 cdev = io_subchannel_create_ccwdev(sch);
836 /* OK, we did everything we could... */
837 css_sch_device_unregister(sch);
840 spin_lock_irq(sch->lock);
841 sch_set_cdev(sch, cdev);
842 spin_unlock_irq(sch->lock);
843 /* Start recognition for the new ccw device. */
844 if (io_subchannel_recog(cdev, sch)) {
845 spin_lock_irq(sch->lock);
846 sch_set_cdev(sch, NULL);
847 spin_unlock_irq(sch->lock);
848 if (cdev->dev.release)
849 cdev->dev.release(&cdev->dev);
850 css_sch_device_unregister(sch);
855 void ccw_device_move_to_orphanage(struct work_struct *work)
857 struct ccw_device_private *priv;
858 struct ccw_device *cdev;
859 struct ccw_device *replacing_cdev;
860 struct subchannel *sch;
862 struct channel_subsystem *css;
863 struct ccw_dev_id dev_id;
865 priv = container_of(work, struct ccw_device_private, kick_work);
867 sch = to_subchannel(cdev->dev.parent);
868 css = to_css(sch->dev.parent);
869 dev_id.devno = sch->schib.pmcw.dev;
870 dev_id.ssid = sch->schid.ssid;
873 * Move the orphaned ccw device to the orphanage so the replacing
874 * ccw device can take its place on the subchannel.
876 ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev);
878 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
879 "(ret=%d)!\n", cdev->private->dev_id.ssid,
880 cdev->private->dev_id.devno, ret);
883 cdev->ccwlock = css->pseudo_subchannel->lock;
885 * Search for the replacing ccw device
886 * - among the disconnected devices
889 replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
890 if (replacing_cdev) {
891 sch_attach_disconnected_device(sch, replacing_cdev);
894 replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
895 if (replacing_cdev) {
896 sch_attach_orphaned_device(sch, replacing_cdev);
899 sch_create_and_recog_new_device(sch);
903 * Register recognized device.
906 io_subchannel_register(struct work_struct *work)
908 struct ccw_device_private *priv;
909 struct ccw_device *cdev;
910 struct subchannel *sch;
914 priv = container_of(work, struct ccw_device_private, kick_work);
916 sch = to_subchannel(cdev->dev.parent);
917 css_update_ssd_info(sch);
919 * io_subchannel_register() will also be called after device
920 * recognition has been done for a boxed device (which will already
921 * be registered). We need to reprobe since we may now have sense id
924 if (klist_node_attached(&cdev->dev.knode_parent)) {
926 ret = device_reprobe(&cdev->dev);
928 /* We can't do much here. */
929 CIO_MSG_EVENT(0, "device_reprobe() returned"
930 " %d for 0.%x.%04x\n", ret,
931 cdev->private->dev_id.ssid,
932 cdev->private->dev_id.devno);
937 * Now we know this subchannel will stay, we can throw
938 * our delayed uevent.
940 sch->dev.uevent_suppress = 0;
941 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
942 /* make it known to the system */
943 ret = ccw_device_register(cdev);
945 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
946 cdev->private->dev_id.ssid,
947 cdev->private->dev_id.devno, ret);
948 put_device(&cdev->dev);
949 spin_lock_irqsave(sch->lock, flags);
950 sch_set_cdev(sch, NULL);
951 spin_unlock_irqrestore(sch->lock, flags);
952 kfree (cdev->private);
954 put_device(&sch->dev);
955 if (atomic_dec_and_test(&ccw_device_init_count))
956 wake_up(&ccw_device_init_wq);
959 put_device(&cdev->dev);
961 cdev->private->flags.recog_done = 1;
962 put_device(&sch->dev);
963 wake_up(&cdev->private->wait_q);
964 if (atomic_dec_and_test(&ccw_device_init_count))
965 wake_up(&ccw_device_init_wq);
968 static void ccw_device_call_sch_unregister(struct work_struct *work)
970 struct ccw_device_private *priv;
971 struct ccw_device *cdev;
972 struct subchannel *sch;
974 priv = container_of(work, struct ccw_device_private, kick_work);
976 sch = to_subchannel(cdev->dev.parent);
977 css_sch_device_unregister(sch);
978 /* Reset intparm to zeroes. */
979 sch->schib.pmcw.intparm = 0;
981 put_device(&cdev->dev);
982 put_device(&sch->dev);
986 * subchannel recognition done. Called from the state machine.
989 io_subchannel_recog_done(struct ccw_device *cdev)
991 struct subchannel *sch;
993 if (css_init_done == 0) {
994 cdev->private->flags.recog_done = 1;
997 switch (cdev->private->state) {
998 case DEV_STATE_NOT_OPER:
999 cdev->private->flags.recog_done = 1;
1000 /* Remove device found not operational. */
1001 if (!get_device(&cdev->dev))
1003 sch = to_subchannel(cdev->dev.parent);
1004 PREPARE_WORK(&cdev->private->kick_work,
1005 ccw_device_call_sch_unregister);
1006 queue_work(slow_path_wq, &cdev->private->kick_work);
1007 if (atomic_dec_and_test(&ccw_device_init_count))
1008 wake_up(&ccw_device_init_wq);
1010 case DEV_STATE_BOXED:
1011 /* Device did not respond in time. */
1012 case DEV_STATE_OFFLINE:
1014 * We can't register the device in interrupt context so
1015 * we schedule a work item.
1017 if (!get_device(&cdev->dev))
1019 PREPARE_WORK(&cdev->private->kick_work,
1020 io_subchannel_register);
1021 queue_work(slow_path_wq, &cdev->private->kick_work);
1027 io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1030 struct ccw_device_private *priv;
1032 sch_set_cdev(sch, cdev);
1033 cdev->ccwlock = sch->lock;
1035 /* Init private data. */
1036 priv = cdev->private;
1037 priv->dev_id.devno = sch->schib.pmcw.dev;
1038 priv->dev_id.ssid = sch->schid.ssid;
1039 priv->schid = sch->schid;
1040 priv->state = DEV_STATE_NOT_OPER;
1041 INIT_LIST_HEAD(&priv->cmb_list);
1042 init_waitqueue_head(&priv->wait_q);
1043 init_timer(&priv->timer);
1045 /* Set an initial name for the device. */
1046 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
1047 sch->schid.ssid, sch->schib.pmcw.dev);
1049 /* Increase counter of devices currently in recognition. */
1050 atomic_inc(&ccw_device_init_count);
1052 /* Start async. device sensing. */
1053 spin_lock_irq(sch->lock);
1054 rc = ccw_device_recognition(cdev);
1055 spin_unlock_irq(sch->lock);
1057 if (atomic_dec_and_test(&ccw_device_init_count))
1058 wake_up(&ccw_device_init_wq);
1063 static void ccw_device_move_to_sch(struct work_struct *work)
1065 struct ccw_device_private *priv;
1067 struct subchannel *sch;
1068 struct ccw_device *cdev;
1069 struct subchannel *former_parent;
1071 priv = container_of(work, struct ccw_device_private, kick_work);
1074 former_parent = ccw_device_is_orphan(cdev) ?
1075 NULL : to_subchannel(get_device(cdev->dev.parent));
1076 mutex_lock(&sch->reg_mutex);
1077 /* Try to move the ccw device to its new subchannel. */
1078 rc = device_move(&cdev->dev, &sch->dev);
1079 mutex_unlock(&sch->reg_mutex);
1081 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel "
1082 "0.%x.%04x failed (ret=%d)!\n",
1083 cdev->private->dev_id.ssid,
1084 cdev->private->dev_id.devno, sch->schid.ssid,
1085 sch->schid.sch_no, rc);
1086 css_sch_device_unregister(sch);
1089 if (former_parent) {
1090 spin_lock_irq(former_parent->lock);
1091 sch_set_cdev(former_parent, NULL);
1092 spin_unlock_irq(former_parent->lock);
1093 css_sch_device_unregister(former_parent);
1094 /* Reset intparm to zeroes. */
1095 former_parent->schib.pmcw.intparm = 0;
1096 cio_modify(former_parent);
1098 sch_attach_device(sch, cdev);
1101 put_device(&former_parent->dev);
1102 put_device(&cdev->dev);
1105 static void io_subchannel_irq(struct subchannel *sch)
1107 struct ccw_device *cdev;
1109 cdev = sch_get_cdev(sch);
1111 CIO_TRACE_EVENT(3, "IRQ");
1112 CIO_TRACE_EVENT(3, sch->dev.bus_id);
1114 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1117 static void io_subchannel_init_fields(struct subchannel *sch)
1119 if (cio_is_console(sch->schid))
1122 sch->opm = chp_get_sch_opm(sch);
1123 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1124 sch->isc = cio_is_console(sch->schid) ? 1 : 3;
1126 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1127 " - PIM = %02X, PAM = %02X, POM = %02X\n",
1128 sch->schib.pmcw.dev, sch->schid.ssid,
1129 sch->schid.sch_no, sch->schib.pmcw.pim,
1130 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1131 /* Initially set up some fields in the pmcw. */
1132 sch->schib.pmcw.ena = 0;
1133 sch->schib.pmcw.csense = 1; /* concurrent sense */
1134 if ((sch->lpm & (sch->lpm - 1)) != 0)
1135 sch->schib.pmcw.mp = 1; /* multipath mode */
1136 /* clean up possible residual cmf stuff */
1137 sch->schib.pmcw.mme = 0;
1138 sch->schib.pmcw.mbfc = 0;
1139 sch->schib.pmcw.mbi = 0;
1143 static int io_subchannel_probe(struct subchannel *sch)
1145 struct ccw_device *cdev;
1147 unsigned long flags;
1148 struct ccw_dev_id dev_id;
1150 cdev = sch_get_cdev(sch);
1152 rc = sysfs_create_group(&sch->dev.kobj,
1153 &io_subchannel_attr_group);
1155 CIO_MSG_EVENT(0, "Failed to create io subchannel "
1156 "attributes for subchannel "
1157 "0.%x.%04x (rc=%d)\n",
1158 sch->schid.ssid, sch->schid.sch_no, rc);
1160 * This subchannel already has an associated ccw_device.
1161 * Throw the delayed uevent for the subchannel, register
1162 * the ccw_device and exit. This happens for all early
1163 * devices, e.g. the console.
1165 sch->dev.uevent_suppress = 0;
1166 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1167 cdev->dev.groups = ccwdev_attr_groups;
1168 device_initialize(&cdev->dev);
1169 ccw_device_register(cdev);
1171 * Check if the device is already online. If it is
1172 * the reference count needs to be corrected
1173 * (see ccw_device_online and css_init_done for the
1176 if (cdev->private->state != DEV_STATE_NOT_OPER &&
1177 cdev->private->state != DEV_STATE_OFFLINE &&
1178 cdev->private->state != DEV_STATE_BOXED)
1179 get_device(&cdev->dev);
1182 io_subchannel_init_fields(sch);
1184 * First check if a fitting device may be found amongst the
1185 * disconnected devices or in the orphanage.
1187 dev_id.devno = sch->schib.pmcw.dev;
1188 dev_id.ssid = sch->schid.ssid;
1189 rc = sysfs_create_group(&sch->dev.kobj,
1190 &io_subchannel_attr_group);
1193 /* Allocate I/O subchannel private data. */
1194 sch->private = kzalloc(sizeof(struct io_subchannel_private),
1195 GFP_KERNEL | GFP_DMA);
1196 if (!sch->private) {
1200 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1202 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
1206 * Schedule moving the device until when we have a registered
1207 * subchannel to move to and succeed the probe. We can
1208 * unregister later again, when the probe is through.
1210 cdev->private->sch = sch;
1211 PREPARE_WORK(&cdev->private->kick_work,
1212 ccw_device_move_to_sch);
1213 queue_work(slow_path_wq, &cdev->private->kick_work);
1216 cdev = io_subchannel_create_ccwdev(sch);
1221 rc = io_subchannel_recog(cdev, sch);
1223 spin_lock_irqsave(sch->lock, flags);
1224 sch_set_cdev(sch, NULL);
1225 spin_unlock_irqrestore(sch->lock, flags);
1226 if (cdev->dev.release)
1227 cdev->dev.release(&cdev->dev);
1232 kfree(sch->private);
1233 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1238 io_subchannel_remove (struct subchannel *sch)
1240 struct ccw_device *cdev;
1241 unsigned long flags;
1243 cdev = sch_get_cdev(sch);
1246 /* Set ccw device to not operational and drop reference. */
1247 spin_lock_irqsave(cdev->ccwlock, flags);
1248 sch_set_cdev(sch, NULL);
1249 cdev->private->state = DEV_STATE_NOT_OPER;
1250 spin_unlock_irqrestore(cdev->ccwlock, flags);
1251 ccw_device_unregister(cdev);
1252 put_device(&cdev->dev);
1253 kfree(sch->private);
1254 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1258 static int io_subchannel_notify(struct subchannel *sch, int event)
1260 struct ccw_device *cdev;
1262 cdev = sch_get_cdev(sch);
1265 return ccw_device_notify(cdev, event);
1268 static void io_subchannel_verify(struct subchannel *sch)
1270 struct ccw_device *cdev;
1272 cdev = sch_get_cdev(sch);
1274 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1277 static int check_for_io_on_path(struct subchannel *sch, int mask)
1281 cc = stsch(sch->schid, &sch->schib);
1284 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
1289 static void terminate_internal_io(struct subchannel *sch,
1290 struct ccw_device *cdev)
1292 if (cio_clear(sch)) {
1293 /* Recheck device in case clear failed. */
1296 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1298 css_schedule_eval(sch->schid);
1301 cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1302 /* Request retry of internal operation. */
1303 cdev->private->flags.intretry = 1;
1306 cdev->handler(cdev, cdev->private->intparm,
1310 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1312 struct ccw_device *cdev;
1314 cdev = sch_get_cdev(sch);
1317 if (check_for_io_on_path(sch, mask)) {
1318 if (cdev->private->state == DEV_STATE_ONLINE)
1319 ccw_device_kill_io(cdev);
1321 terminate_internal_io(sch, cdev);
1322 /* Re-start path verification. */
1323 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1326 /* trigger path verification. */
1327 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1331 static int io_subchannel_chp_event(struct subchannel *sch, void *data,
1335 struct res_acc_data *res_data;
1338 mask = chp_ssd_get_mask(&sch->ssd_info, res_data);
1345 io_subchannel_terminate_path(sch, mask);
1350 io_subchannel_verify(sch);
1353 if (stsch(sch->schid, &sch->schib))
1355 if (!css_sch_is_valid(&sch->schib))
1357 io_subchannel_terminate_path(sch, mask);
1360 if (stsch(sch->schid, &sch->schib))
1362 sch->lpm |= mask & sch->opm;
1363 io_subchannel_verify(sch);
1370 io_subchannel_shutdown(struct subchannel *sch)
1372 struct ccw_device *cdev;
1375 cdev = sch_get_cdev(sch);
1377 if (cio_is_console(sch->schid))
1379 if (!sch->schib.pmcw.ena)
1380 /* Nothing to do. */
1382 ret = cio_disable_subchannel(sch);
1384 /* Subchannel is disabled, we're done. */
1386 cdev->private->state = DEV_STATE_QUIESCE;
1388 cdev->handler(cdev, cdev->private->intparm,
1390 ret = ccw_device_cancel_halt_clear(cdev);
1391 if (ret == -EBUSY) {
1392 ccw_device_set_timeout(cdev, HZ/10);
1393 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1395 cio_disable_subchannel(sch);
1398 static int io_subchannel_get_status(struct subchannel *sch)
1402 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
1404 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
1405 return CIO_REVALIDATE;
1411 static int device_is_disconnected(struct ccw_device *cdev)
1415 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1416 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1419 static int recovery_check(struct device *dev, void *data)
1421 struct ccw_device *cdev = to_ccwdev(dev);
1424 spin_lock_irq(cdev->ccwlock);
1425 switch (cdev->private->state) {
1426 case DEV_STATE_DISCONNECTED:
1427 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1428 cdev->private->dev_id.ssid,
1429 cdev->private->dev_id.devno);
1430 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1433 case DEV_STATE_DISCONNECTED_SENSE_ID:
1437 spin_unlock_irq(cdev->ccwlock);
1442 static void recovery_work_func(struct work_struct *unused)
1446 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1448 spin_lock_irq(&recovery_lock);
1449 if (!timer_pending(&recovery_timer)) {
1450 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1452 mod_timer(&recovery_timer, jiffies +
1453 recovery_delay[recovery_phase] * HZ);
1455 spin_unlock_irq(&recovery_lock);
1457 CIO_MSG_EVENT(4, "recovery: end\n");
1460 static DECLARE_WORK(recovery_work, recovery_work_func);
1462 static void recovery_func(unsigned long data)
1465 * We can't do our recovery in softirq context and it's not
1466 * performance critical, so we schedule it.
1468 schedule_work(&recovery_work);
1471 static void ccw_device_schedule_recovery(void)
1473 unsigned long flags;
1475 CIO_MSG_EVENT(4, "recovery: schedule\n");
1476 spin_lock_irqsave(&recovery_lock, flags);
1477 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1479 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1481 spin_unlock_irqrestore(&recovery_lock, flags);
1484 static void device_set_disconnected(struct ccw_device *cdev)
1488 ccw_device_set_timeout(cdev, 0);
1489 cdev->private->flags.fake_irb = 0;
1490 cdev->private->state = DEV_STATE_DISCONNECTED;
1492 ccw_device_schedule_recovery();
1495 static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1497 int event, ret, disc;
1498 unsigned long flags;
1499 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
1500 struct ccw_device *cdev;
1502 spin_lock_irqsave(sch->lock, flags);
1503 cdev = sch_get_cdev(sch);
1504 disc = device_is_disconnected(cdev);
1506 /* Disconnected devices are evaluated directly only.*/
1507 spin_unlock_irqrestore(sch->lock, flags);
1510 /* No interrupt after machine check - kill pending timers. */
1512 ccw_device_set_timeout(cdev, 0);
1513 if (!disc && !slow) {
1514 /* Non-disconnected devices are evaluated on the slow path. */
1515 spin_unlock_irqrestore(sch->lock, flags);
1518 event = io_subchannel_get_status(sch);
1519 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
1520 sch->schid.ssid, sch->schid.sch_no, event,
1521 disc ? "disconnected" : "normal",
1522 slow ? "slow" : "fast");
1523 /* Analyze subchannel status. */
1528 /* Check if paths have become available. */
1534 /* Prevent unwanted effects when opening lock. */
1535 cio_disable_subchannel(sch);
1536 device_set_disconnected(cdev);
1537 /* Ask driver what to do with device. */
1538 action = UNREGISTER;
1539 spin_unlock_irqrestore(sch->lock, flags);
1540 ret = io_subchannel_notify(sch, event);
1541 spin_lock_irqsave(sch->lock, flags);
1545 case CIO_REVALIDATE:
1546 /* Device will be removed, so no notify necessary. */
1548 /* Reprobe because immediate unregister might block. */
1551 action = UNREGISTER_PROBE;
1555 /* Get device operational again. */
1559 /* Perform action. */
1563 case UNREGISTER_PROBE:
1564 /* Unregister device (will use subchannel lock). */
1565 spin_unlock_irqrestore(sch->lock, flags);
1566 css_sch_device_unregister(sch);
1567 spin_lock_irqsave(sch->lock, flags);
1569 /* Reset intparm to zeroes. */
1570 sch->schib.pmcw.intparm = 0;
1574 ccw_device_trigger_reprobe(cdev);
1579 spin_unlock_irqrestore(sch->lock, flags);
1580 /* Probe if necessary. */
1581 if (action == UNREGISTER_PROBE)
1582 ret = css_probe_device(sch->schid);
1587 #ifdef CONFIG_CCW_CONSOLE
1588 static struct ccw_device console_cdev;
1589 static struct ccw_device_private console_private;
1590 static int console_cdev_in_use;
1592 static DEFINE_SPINLOCK(ccw_console_lock);
1594 spinlock_t * cio_get_console_lock(void)
1596 return &ccw_console_lock;
1599 static int ccw_device_console_enable(struct ccw_device *cdev,
1600 struct subchannel *sch)
1604 /* Attach subchannel private data. */
1605 sch->private = cio_get_console_priv();
1606 memset(sch->private, 0, sizeof(struct io_subchannel_private));
1607 io_subchannel_init_fields(sch);
1608 sch->driver = &io_subchannel_driver;
1609 /* Initialize the ccw_device structure. */
1610 cdev->dev.parent= &sch->dev;
1611 rc = io_subchannel_recog(cdev, sch);
1615 /* Now wait for the async. recognition to come to an end. */
1616 spin_lock_irq(cdev->ccwlock);
1617 while (!dev_fsm_final_state(cdev))
1620 if (cdev->private->state != DEV_STATE_OFFLINE)
1622 ccw_device_online(cdev);
1623 while (!dev_fsm_final_state(cdev))
1625 if (cdev->private->state != DEV_STATE_ONLINE)
1629 spin_unlock_irq(cdev->ccwlock);
1634 ccw_device_probe_console(void)
1636 struct subchannel *sch;
1639 if (xchg(&console_cdev_in_use, 1) != 0)
1640 return ERR_PTR(-EBUSY);
1641 sch = cio_probe_console();
1643 console_cdev_in_use = 0;
1644 return (void *) sch;
1646 memset(&console_cdev, 0, sizeof(struct ccw_device));
1647 memset(&console_private, 0, sizeof(struct ccw_device_private));
1648 console_cdev.private = &console_private;
1649 console_private.cdev = &console_cdev;
1650 ret = ccw_device_console_enable(&console_cdev, sch);
1652 cio_release_console();
1653 console_cdev_in_use = 0;
1654 return ERR_PTR(ret);
1656 console_cdev.online = 1;
1657 return &console_cdev;
1662 * get ccw_device matching the busid, but only if owned by cdrv
1665 __ccwdev_check_busid(struct device *dev, void *id)
1671 return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0);
1676 * get_ccwdev_by_busid() - obtain device from a bus id
1677 * @cdrv: driver the device is owned by
1678 * @bus_id: bus id of the device to be searched
1680 * This function searches all devices owned by @cdrv for a device with a bus
1681 * id matching @bus_id.
1683 * If a match is found, its reference count of the found device is increased
1684 * and it is returned; else %NULL is returned.
1686 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1690 struct device_driver *drv;
1692 drv = get_driver(&cdrv->driver);
1696 dev = driver_find_device(drv, NULL, (void *)bus_id,
1697 __ccwdev_check_busid);
1700 return dev ? to_ccwdev(dev) : NULL;
1703 /************************** device driver handling ************************/
1705 /* This is the implementation of the ccw_driver class. The probe, remove
1706 * and release methods are initially very similar to the device_driver
1707 * implementations, with the difference that they have ccw_device
1710 * A ccw driver also contains the information that is needed for
1714 ccw_device_probe (struct device *dev)
1716 struct ccw_device *cdev = to_ccwdev(dev);
1717 struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1720 cdev->drv = cdrv; /* to let the driver call _set_online */
1722 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1733 ccw_device_remove (struct device *dev)
1735 struct ccw_device *cdev = to_ccwdev(dev);
1736 struct ccw_driver *cdrv = cdev->drv;
1743 spin_lock_irq(cdev->ccwlock);
1744 ret = ccw_device_offline(cdev);
1745 spin_unlock_irq(cdev->ccwlock);
1747 wait_event(cdev->private->wait_q,
1748 dev_fsm_final_state(cdev));
1750 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1751 "device 0.%x.%04x\n",
1752 ret, cdev->private->dev_id.ssid,
1753 cdev->private->dev_id.devno);
1755 ccw_device_set_timeout(cdev, 0);
1760 static void ccw_device_shutdown(struct device *dev)
1762 struct ccw_device *cdev;
1764 cdev = to_ccwdev(dev);
1765 if (cdev->drv && cdev->drv->shutdown)
1766 cdev->drv->shutdown(cdev);
1770 struct bus_type ccw_bus_type = {
1772 .match = ccw_bus_match,
1773 .uevent = ccw_uevent,
1774 .probe = ccw_device_probe,
1775 .remove = ccw_device_remove,
1776 .shutdown = ccw_device_shutdown,
1780 * ccw_driver_register() - register a ccw driver
1781 * @cdriver: driver to be registered
1783 * This function is mainly a wrapper around driver_register().
1785 * %0 on success and a negative error value on failure.
1787 int ccw_driver_register(struct ccw_driver *cdriver)
1789 struct device_driver *drv = &cdriver->driver;
1791 drv->bus = &ccw_bus_type;
1792 drv->name = cdriver->name;
1793 drv->owner = cdriver->owner;
1795 return driver_register(drv);
1799 * ccw_driver_unregister() - deregister a ccw driver
1800 * @cdriver: driver to be deregistered
1802 * This function is mainly a wrapper around driver_unregister().
1804 void ccw_driver_unregister(struct ccw_driver *cdriver)
1806 driver_unregister(&cdriver->driver);
1809 /* Helper func for qdio. */
1810 struct subchannel_id
1811 ccw_device_get_subchannel_id(struct ccw_device *cdev)
1813 struct subchannel *sch;
1815 sch = to_subchannel(cdev->dev.parent);
1819 MODULE_LICENSE("GPL");
1820 EXPORT_SYMBOL(ccw_device_set_online);
1821 EXPORT_SYMBOL(ccw_device_set_offline);
1822 EXPORT_SYMBOL(ccw_driver_register);
1823 EXPORT_SYMBOL(ccw_driver_unregister);
1824 EXPORT_SYMBOL(get_ccwdev_by_busid);
1825 EXPORT_SYMBOL(ccw_bus_type);
1826 EXPORT_SYMBOL(ccw_device_work);
1827 EXPORT_SYMBOL(ccw_device_notify_work);
1828 EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);