[S390] cio: Export some symbols for modular css drivers.
[linux-2.6] / drivers / s390 / cio / device.c
1 /*
2  *  drivers/s390/cio/device.c
3  *  bus driver for ccw devices
4  *
5  *    Copyright IBM Corp. 2002,2008
6  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *               Cornelia Huck (cornelia.huck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/spinlock.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/device.h>
18 #include <linux/workqueue.h>
19 #include <linux/timer.h>
20
21 #include <asm/ccwdev.h>
22 #include <asm/cio.h>
23 #include <asm/param.h>          /* HZ */
24 #include <asm/cmb.h>
25
26 #include "chp.h"
27 #include "cio.h"
28 #include "cio_debug.h"
29 #include "css.h"
30 #include "device.h"
31 #include "ioasm.h"
32 #include "io_sch.h"
33
34 static struct timer_list recovery_timer;
35 static DEFINE_SPINLOCK(recovery_lock);
36 static int recovery_phase;
37 static const unsigned long recovery_delay[] = { 3, 30, 300 };
38
39 /******************* bus type handling ***********************/
40
41 /* The Linux driver model distinguishes between a bus type and
42  * the bus itself. Of course we only have one channel
43  * subsystem driver and one channel system per machine, but
44  * we still use the abstraction. T.R. says it's a good idea. */
45 static int
46 ccw_bus_match (struct device * dev, struct device_driver * drv)
47 {
48         struct ccw_device *cdev = to_ccwdev(dev);
49         struct ccw_driver *cdrv = to_ccwdrv(drv);
50         const struct ccw_device_id *ids = cdrv->ids, *found;
51
52         if (!ids)
53                 return 0;
54
55         found = ccw_device_id_match(ids, &cdev->id);
56         if (!found)
57                 return 0;
58
59         cdev->id.driver_info = found->driver_info;
60
61         return 1;
62 }
63
64 /* Store modalias string delimited by prefix/suffix string into buffer with
65  * specified size. Return length of resulting string (excluding trailing '\0')
66  * even if string doesn't fit buffer (snprintf semantics). */
67 static int snprint_alias(char *buf, size_t size,
68                          struct ccw_device_id *id, const char *suffix)
69 {
70         int len;
71
72         len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
73         if (len > size)
74                 return len;
75         buf += len;
76         size -= len;
77
78         if (id->dev_type != 0)
79                 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
80                                 id->dev_model, suffix);
81         else
82                 len += snprintf(buf, size, "dtdm%s", suffix);
83
84         return len;
85 }
86
87 /* Set up environment variables for ccw device uevent. Return 0 on success,
88  * non-zero otherwise. */
89 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
90 {
91         struct ccw_device *cdev = to_ccwdev(dev);
92         struct ccw_device_id *id = &(cdev->id);
93         int ret;
94         char modalias_buf[30];
95
96         /* CU_TYPE= */
97         ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
98         if (ret)
99                 return ret;
100
101         /* CU_MODEL= */
102         ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
103         if (ret)
104                 return ret;
105
106         /* The next two can be zero, that's ok for us */
107         /* DEV_TYPE= */
108         ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
109         if (ret)
110                 return ret;
111
112         /* DEV_MODEL= */
113         ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
114         if (ret)
115                 return ret;
116
117         /* MODALIAS=  */
118         snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
119         ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
120         return ret;
121 }
122
123 struct bus_type ccw_bus_type;
124
125 static void io_subchannel_irq(struct subchannel *);
126 static int io_subchannel_probe(struct subchannel *);
127 static int io_subchannel_remove(struct subchannel *);
128 static void io_subchannel_shutdown(struct subchannel *);
129 static int io_subchannel_sch_event(struct subchannel *, int);
130 static int io_subchannel_chp_event(struct subchannel *, void *, int);
131
132 static struct css_driver io_subchannel_driver = {
133         .owner = THIS_MODULE,
134         .subchannel_type = SUBCHANNEL_TYPE_IO,
135         .name = "io_subchannel",
136         .irq = io_subchannel_irq,
137         .sch_event = io_subchannel_sch_event,
138         .chp_event = io_subchannel_chp_event,
139         .probe = io_subchannel_probe,
140         .remove = io_subchannel_remove,
141         .shutdown = io_subchannel_shutdown,
142 };
143
144 struct workqueue_struct *ccw_device_work;
145 struct workqueue_struct *ccw_device_notify_work;
146 wait_queue_head_t ccw_device_init_wq;
147 atomic_t ccw_device_init_count;
148
149 static void recovery_func(unsigned long data);
150
151 static int __init
152 init_ccw_bus_type (void)
153 {
154         int ret;
155
156         init_waitqueue_head(&ccw_device_init_wq);
157         atomic_set(&ccw_device_init_count, 0);
158         setup_timer(&recovery_timer, recovery_func, 0);
159
160         ccw_device_work = create_singlethread_workqueue("cio");
161         if (!ccw_device_work)
162                 return -ENOMEM; /* FIXME: better errno ? */
163         ccw_device_notify_work = create_singlethread_workqueue("cio_notify");
164         if (!ccw_device_notify_work) {
165                 ret = -ENOMEM; /* FIXME: better errno ? */
166                 goto out_err;
167         }
168         slow_path_wq = create_singlethread_workqueue("kslowcrw");
169         if (!slow_path_wq) {
170                 ret = -ENOMEM; /* FIXME: better errno ? */
171                 goto out_err;
172         }
173         if ((ret = bus_register (&ccw_bus_type)))
174                 goto out_err;
175
176         ret = css_driver_register(&io_subchannel_driver);
177         if (ret)
178                 goto out_err;
179
180         wait_event(ccw_device_init_wq,
181                    atomic_read(&ccw_device_init_count) == 0);
182         flush_workqueue(ccw_device_work);
183         return 0;
184 out_err:
185         if (ccw_device_work)
186                 destroy_workqueue(ccw_device_work);
187         if (ccw_device_notify_work)
188                 destroy_workqueue(ccw_device_notify_work);
189         if (slow_path_wq)
190                 destroy_workqueue(slow_path_wq);
191         return ret;
192 }
193
194 static void __exit
195 cleanup_ccw_bus_type (void)
196 {
197         css_driver_unregister(&io_subchannel_driver);
198         bus_unregister(&ccw_bus_type);
199         destroy_workqueue(ccw_device_notify_work);
200         destroy_workqueue(ccw_device_work);
201 }
202
203 subsys_initcall(init_ccw_bus_type);
204 module_exit(cleanup_ccw_bus_type);
205
206 /************************ device handling **************************/
207
208 /*
209  * A ccw_device has some interfaces in sysfs in addition to the
210  * standard ones.
211  * The following entries are designed to export the information which
212  * resided in 2.4 in /proc/subchannels. Subchannel and device number
213  * are obvious, so they don't have an entry :)
214  * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
215  */
216 static ssize_t
217 chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
218 {
219         struct subchannel *sch = to_subchannel(dev);
220         struct chsc_ssd_info *ssd = &sch->ssd_info;
221         ssize_t ret = 0;
222         int chp;
223         int mask;
224
225         for (chp = 0; chp < 8; chp++) {
226                 mask = 0x80 >> chp;
227                 if (ssd->path_mask & mask)
228                         ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
229                 else
230                         ret += sprintf(buf + ret, "00 ");
231         }
232         ret += sprintf (buf+ret, "\n");
233         return min((ssize_t)PAGE_SIZE, ret);
234 }
235
236 static ssize_t
237 pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
238 {
239         struct subchannel *sch = to_subchannel(dev);
240         struct pmcw *pmcw = &sch->schib.pmcw;
241
242         return sprintf (buf, "%02x %02x %02x\n",
243                         pmcw->pim, pmcw->pam, pmcw->pom);
244 }
245
246 static ssize_t
247 devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
248 {
249         struct ccw_device *cdev = to_ccwdev(dev);
250         struct ccw_device_id *id = &(cdev->id);
251
252         if (id->dev_type != 0)
253                 return sprintf(buf, "%04x/%02x\n",
254                                 id->dev_type, id->dev_model);
255         else
256                 return sprintf(buf, "n/a\n");
257 }
258
259 static ssize_t
260 cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
261 {
262         struct ccw_device *cdev = to_ccwdev(dev);
263         struct ccw_device_id *id = &(cdev->id);
264
265         return sprintf(buf, "%04x/%02x\n",
266                        id->cu_type, id->cu_model);
267 }
268
269 static ssize_t
270 modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
271 {
272         struct ccw_device *cdev = to_ccwdev(dev);
273         struct ccw_device_id *id = &(cdev->id);
274         int len;
275
276         len = snprint_alias(buf, PAGE_SIZE, id, "\n");
277
278         return len > PAGE_SIZE ? PAGE_SIZE : len;
279 }
280
281 static ssize_t
282 online_show (struct device *dev, struct device_attribute *attr, char *buf)
283 {
284         struct ccw_device *cdev = to_ccwdev(dev);
285
286         return sprintf(buf, cdev->online ? "1\n" : "0\n");
287 }
288
289 int ccw_device_is_orphan(struct ccw_device *cdev)
290 {
291         return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
292 }
293
294 static void ccw_device_unregister(struct ccw_device *cdev)
295 {
296         if (test_and_clear_bit(1, &cdev->private->registered))
297                 device_del(&cdev->dev);
298 }
299
300 static void ccw_device_remove_orphan_cb(struct device *dev)
301 {
302         struct ccw_device *cdev = to_ccwdev(dev);
303
304         ccw_device_unregister(cdev);
305         put_device(&cdev->dev);
306 }
307
308 static void ccw_device_remove_sch_cb(struct device *dev)
309 {
310         struct subchannel *sch;
311
312         sch = to_subchannel(dev);
313         css_sch_device_unregister(sch);
314         /* Reset intparm to zeroes. */
315         sch->schib.pmcw.intparm = 0;
316         cio_modify(sch);
317         put_device(&sch->dev);
318 }
319
320 static void
321 ccw_device_remove_disconnected(struct ccw_device *cdev)
322 {
323         unsigned long flags;
324         int rc;
325
326         /*
327          * Forced offline in disconnected state means
328          * 'throw away device'.
329          */
330         if (ccw_device_is_orphan(cdev)) {
331                 /*
332                  * Deregister ccw device.
333                  * Unfortunately, we cannot do this directly from the
334                  * attribute method.
335                  */
336                 spin_lock_irqsave(cdev->ccwlock, flags);
337                 cdev->private->state = DEV_STATE_NOT_OPER;
338                 spin_unlock_irqrestore(cdev->ccwlock, flags);
339                 rc = device_schedule_callback(&cdev->dev,
340                                               ccw_device_remove_orphan_cb);
341                 if (rc)
342                         CIO_MSG_EVENT(0, "Couldn't unregister orphan "
343                                       "0.%x.%04x\n",
344                                       cdev->private->dev_id.ssid,
345                                       cdev->private->dev_id.devno);
346                 return;
347         }
348         /* Deregister subchannel, which will kill the ccw device. */
349         rc = device_schedule_callback(cdev->dev.parent,
350                                       ccw_device_remove_sch_cb);
351         if (rc)
352                 CIO_MSG_EVENT(0, "Couldn't unregister disconnected device "
353                               "0.%x.%04x\n",
354                               cdev->private->dev_id.ssid,
355                               cdev->private->dev_id.devno);
356 }
357
358 /**
359  * ccw_device_set_offline() - disable a ccw device for I/O
360  * @cdev: target ccw device
361  *
362  * This function calls the driver's set_offline() function for @cdev, if
363  * given, and then disables @cdev.
364  * Returns:
365  *   %0 on success and a negative error value on failure.
366  * Context:
367  *  enabled, ccw device lock not held
368  */
369 int ccw_device_set_offline(struct ccw_device *cdev)
370 {
371         int ret;
372
373         if (!cdev)
374                 return -ENODEV;
375         if (!cdev->online || !cdev->drv)
376                 return -EINVAL;
377
378         if (cdev->drv->set_offline) {
379                 ret = cdev->drv->set_offline(cdev);
380                 if (ret != 0)
381                         return ret;
382         }
383         cdev->online = 0;
384         spin_lock_irq(cdev->ccwlock);
385         ret = ccw_device_offline(cdev);
386         if (ret == -ENODEV) {
387                 if (cdev->private->state != DEV_STATE_NOT_OPER) {
388                         cdev->private->state = DEV_STATE_OFFLINE;
389                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
390                 }
391                 spin_unlock_irq(cdev->ccwlock);
392                 return ret;
393         }
394         spin_unlock_irq(cdev->ccwlock);
395         if (ret == 0)
396                 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
397         else {
398                 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
399                               "device 0.%x.%04x\n",
400                               ret, cdev->private->dev_id.ssid,
401                               cdev->private->dev_id.devno);
402                 cdev->online = 1;
403         }
404         return ret;
405 }
406
407 /**
408  * ccw_device_set_online() - enable a ccw device for I/O
409  * @cdev: target ccw device
410  *
411  * This function first enables @cdev and then calls the driver's set_online()
412  * function for @cdev, if given. If set_online() returns an error, @cdev is
413  * disabled again.
414  * Returns:
415  *   %0 on success and a negative error value on failure.
416  * Context:
417  *  enabled, ccw device lock not held
418  */
419 int ccw_device_set_online(struct ccw_device *cdev)
420 {
421         int ret;
422
423         if (!cdev)
424                 return -ENODEV;
425         if (cdev->online || !cdev->drv)
426                 return -EINVAL;
427
428         spin_lock_irq(cdev->ccwlock);
429         ret = ccw_device_online(cdev);
430         spin_unlock_irq(cdev->ccwlock);
431         if (ret == 0)
432                 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
433         else {
434                 CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
435                               "device 0.%x.%04x\n",
436                               ret, cdev->private->dev_id.ssid,
437                               cdev->private->dev_id.devno);
438                 return ret;
439         }
440         if (cdev->private->state != DEV_STATE_ONLINE)
441                 return -ENODEV;
442         if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
443                 cdev->online = 1;
444                 return 0;
445         }
446         spin_lock_irq(cdev->ccwlock);
447         ret = ccw_device_offline(cdev);
448         spin_unlock_irq(cdev->ccwlock);
449         if (ret == 0)
450                 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
451         else
452                 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
453                               "device 0.%x.%04x\n",
454                               ret, cdev->private->dev_id.ssid,
455                               cdev->private->dev_id.devno);
456         return (ret == 0) ? -ENODEV : ret;
457 }
458
459 static void online_store_handle_offline(struct ccw_device *cdev)
460 {
461         if (cdev->private->state == DEV_STATE_DISCONNECTED)
462                 ccw_device_remove_disconnected(cdev);
463         else if (cdev->drv && cdev->drv->set_offline)
464                 ccw_device_set_offline(cdev);
465 }
466
467 static int online_store_recog_and_online(struct ccw_device *cdev)
468 {
469         int ret;
470
471         /* Do device recognition, if needed. */
472         if (cdev->id.cu_type == 0) {
473                 ret = ccw_device_recognition(cdev);
474                 if (ret) {
475                         CIO_MSG_EVENT(0, "Couldn't start recognition "
476                                       "for device 0.%x.%04x (ret=%d)\n",
477                                       cdev->private->dev_id.ssid,
478                                       cdev->private->dev_id.devno, ret);
479                         return ret;
480                 }
481                 wait_event(cdev->private->wait_q,
482                            cdev->private->flags.recog_done);
483         }
484         if (cdev->drv && cdev->drv->set_online)
485                 ccw_device_set_online(cdev);
486         return 0;
487 }
488 static void online_store_handle_online(struct ccw_device *cdev, int force)
489 {
490         int ret;
491
492         ret = online_store_recog_and_online(cdev);
493         if (ret)
494                 return;
495         if (force && cdev->private->state == DEV_STATE_BOXED) {
496                 ret = ccw_device_stlck(cdev);
497                 if (ret) {
498                         dev_warn(&cdev->dev,
499                                  "ccw_device_stlck returned %d!\n", ret);
500                         return;
501                 }
502                 if (cdev->id.cu_type == 0)
503                         cdev->private->state = DEV_STATE_NOT_OPER;
504                 online_store_recog_and_online(cdev);
505         }
506
507 }
508
509 static ssize_t online_store (struct device *dev, struct device_attribute *attr,
510                              const char *buf, size_t count)
511 {
512         struct ccw_device *cdev = to_ccwdev(dev);
513         int force, ret;
514         unsigned long i;
515
516         if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
517                 return -EAGAIN;
518
519         if (cdev->drv && !try_module_get(cdev->drv->owner)) {
520                 atomic_set(&cdev->private->onoff, 0);
521                 return -EINVAL;
522         }
523         if (!strncmp(buf, "force\n", count)) {
524                 force = 1;
525                 i = 1;
526                 ret = 0;
527         } else {
528                 force = 0;
529                 ret = strict_strtoul(buf, 16, &i);
530         }
531         if (ret)
532                 goto out;
533         switch (i) {
534         case 0:
535                 online_store_handle_offline(cdev);
536                 ret = count;
537                 break;
538         case 1:
539                 online_store_handle_online(cdev, force);
540                 ret = count;
541                 break;
542         default:
543                 ret = -EINVAL;
544         }
545 out:
546         if (cdev->drv)
547                 module_put(cdev->drv->owner);
548         atomic_set(&cdev->private->onoff, 0);
549         return ret;
550 }
551
552 static ssize_t
553 available_show (struct device *dev, struct device_attribute *attr, char *buf)
554 {
555         struct ccw_device *cdev = to_ccwdev(dev);
556         struct subchannel *sch;
557
558         if (ccw_device_is_orphan(cdev))
559                 return sprintf(buf, "no device\n");
560         switch (cdev->private->state) {
561         case DEV_STATE_BOXED:
562                 return sprintf(buf, "boxed\n");
563         case DEV_STATE_DISCONNECTED:
564         case DEV_STATE_DISCONNECTED_SENSE_ID:
565         case DEV_STATE_NOT_OPER:
566                 sch = to_subchannel(dev->parent);
567                 if (!sch->lpm)
568                         return sprintf(buf, "no path\n");
569                 else
570                         return sprintf(buf, "no device\n");
571         default:
572                 /* All other states considered fine. */
573                 return sprintf(buf, "good\n");
574         }
575 }
576
577 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
578 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
579 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
580 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
581 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
582 static DEVICE_ATTR(online, 0644, online_show, online_store);
583 static DEVICE_ATTR(availability, 0444, available_show, NULL);
584
585 static struct attribute *io_subchannel_attrs[] = {
586         &dev_attr_chpids.attr,
587         &dev_attr_pimpampom.attr,
588         NULL,
589 };
590
591 static struct attribute_group io_subchannel_attr_group = {
592         .attrs = io_subchannel_attrs,
593 };
594
595 static struct attribute * ccwdev_attrs[] = {
596         &dev_attr_devtype.attr,
597         &dev_attr_cutype.attr,
598         &dev_attr_modalias.attr,
599         &dev_attr_online.attr,
600         &dev_attr_cmb_enable.attr,
601         &dev_attr_availability.attr,
602         NULL,
603 };
604
605 static struct attribute_group ccwdev_attr_group = {
606         .attrs = ccwdev_attrs,
607 };
608
609 static struct attribute_group *ccwdev_attr_groups[] = {
610         &ccwdev_attr_group,
611         NULL,
612 };
613
614 /* this is a simple abstraction for device_register that sets the
615  * correct bus type and adds the bus specific files */
616 static int ccw_device_register(struct ccw_device *cdev)
617 {
618         struct device *dev = &cdev->dev;
619         int ret;
620
621         dev->bus = &ccw_bus_type;
622
623         if ((ret = device_add(dev)))
624                 return ret;
625
626         set_bit(1, &cdev->private->registered);
627         return ret;
628 }
629
630 struct match_data {
631         struct ccw_dev_id dev_id;
632         struct ccw_device * sibling;
633 };
634
635 static int
636 match_devno(struct device * dev, void * data)
637 {
638         struct match_data * d = data;
639         struct ccw_device * cdev;
640
641         cdev = to_ccwdev(dev);
642         if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
643             !ccw_device_is_orphan(cdev) &&
644             ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
645             (cdev != d->sibling))
646                 return 1;
647         return 0;
648 }
649
650 static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
651                                                      struct ccw_device *sibling)
652 {
653         struct device *dev;
654         struct match_data data;
655
656         data.dev_id = *dev_id;
657         data.sibling = sibling;
658         dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
659
660         return dev ? to_ccwdev(dev) : NULL;
661 }
662
663 static int match_orphan(struct device *dev, void *data)
664 {
665         struct ccw_dev_id *dev_id;
666         struct ccw_device *cdev;
667
668         dev_id = data;
669         cdev = to_ccwdev(dev);
670         return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
671 }
672
673 static struct ccw_device *
674 get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
675                               struct ccw_dev_id *dev_id)
676 {
677         struct device *dev;
678
679         dev = device_find_child(&css->pseudo_subchannel->dev, dev_id,
680                                 match_orphan);
681
682         return dev ? to_ccwdev(dev) : NULL;
683 }
684
685 static void
686 ccw_device_add_changed(struct work_struct *work)
687 {
688         struct ccw_device_private *priv;
689         struct ccw_device *cdev;
690
691         priv = container_of(work, struct ccw_device_private, kick_work);
692         cdev = priv->cdev;
693         if (device_add(&cdev->dev)) {
694                 put_device(&cdev->dev);
695                 return;
696         }
697         set_bit(1, &cdev->private->registered);
698 }
699
700 void ccw_device_do_unreg_rereg(struct work_struct *work)
701 {
702         struct ccw_device_private *priv;
703         struct ccw_device *cdev;
704         struct subchannel *sch;
705
706         priv = container_of(work, struct ccw_device_private, kick_work);
707         cdev = priv->cdev;
708         sch = to_subchannel(cdev->dev.parent);
709
710         ccw_device_unregister(cdev);
711         PREPARE_WORK(&cdev->private->kick_work,
712                      ccw_device_add_changed);
713         queue_work(ccw_device_work, &cdev->private->kick_work);
714 }
715
716 static void
717 ccw_device_release(struct device *dev)
718 {
719         struct ccw_device *cdev;
720
721         cdev = to_ccwdev(dev);
722         kfree(cdev->private);
723         kfree(cdev);
724 }
725
726 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
727 {
728         struct ccw_device *cdev;
729
730         cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
731         if (cdev) {
732                 cdev->private = kzalloc(sizeof(struct ccw_device_private),
733                                         GFP_KERNEL | GFP_DMA);
734                 if (cdev->private)
735                         return cdev;
736         }
737         kfree(cdev);
738         return ERR_PTR(-ENOMEM);
739 }
740
741 static int io_subchannel_initialize_dev(struct subchannel *sch,
742                                         struct ccw_device *cdev)
743 {
744         cdev->private->cdev = cdev;
745         atomic_set(&cdev->private->onoff, 0);
746         cdev->dev.parent = &sch->dev;
747         cdev->dev.release = ccw_device_release;
748         INIT_WORK(&cdev->private->kick_work, NULL);
749         cdev->dev.groups = ccwdev_attr_groups;
750         /* Do first half of device_register. */
751         device_initialize(&cdev->dev);
752         if (!get_device(&sch->dev)) {
753                 if (cdev->dev.release)
754                         cdev->dev.release(&cdev->dev);
755                 return -ENODEV;
756         }
757         return 0;
758 }
759
760 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
761 {
762         struct ccw_device *cdev;
763         int ret;
764
765         cdev = io_subchannel_allocate_dev(sch);
766         if (!IS_ERR(cdev)) {
767                 ret = io_subchannel_initialize_dev(sch, cdev);
768                 if (ret) {
769                         kfree(cdev);
770                         cdev = ERR_PTR(ret);
771                 }
772         }
773         return cdev;
774 }
775
776 static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
777
778 static void sch_attach_device(struct subchannel *sch,
779                               struct ccw_device *cdev)
780 {
781         css_update_ssd_info(sch);
782         spin_lock_irq(sch->lock);
783         sch_set_cdev(sch, cdev);
784         cdev->private->schid = sch->schid;
785         cdev->ccwlock = sch->lock;
786         ccw_device_trigger_reprobe(cdev);
787         spin_unlock_irq(sch->lock);
788 }
789
790 static void sch_attach_disconnected_device(struct subchannel *sch,
791                                            struct ccw_device *cdev)
792 {
793         struct subchannel *other_sch;
794         int ret;
795
796         other_sch = to_subchannel(get_device(cdev->dev.parent));
797         ret = device_move(&cdev->dev, &sch->dev);
798         if (ret) {
799                 CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
800                               "(ret=%d)!\n", cdev->private->dev_id.ssid,
801                               cdev->private->dev_id.devno, ret);
802                 put_device(&other_sch->dev);
803                 return;
804         }
805         sch_set_cdev(other_sch, NULL);
806         /* No need to keep a subchannel without ccw device around. */
807         css_sch_device_unregister(other_sch);
808         put_device(&other_sch->dev);
809         sch_attach_device(sch, cdev);
810 }
811
812 static void sch_attach_orphaned_device(struct subchannel *sch,
813                                        struct ccw_device *cdev)
814 {
815         int ret;
816
817         /* Try to move the ccw device to its new subchannel. */
818         ret = device_move(&cdev->dev, &sch->dev);
819         if (ret) {
820                 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
821                               "failed (ret=%d)!\n",
822                               cdev->private->dev_id.ssid,
823                               cdev->private->dev_id.devno, ret);
824                 return;
825         }
826         sch_attach_device(sch, cdev);
827 }
828
829 static void sch_create_and_recog_new_device(struct subchannel *sch)
830 {
831         struct ccw_device *cdev;
832
833         /* Need to allocate a new ccw device. */
834         cdev = io_subchannel_create_ccwdev(sch);
835         if (IS_ERR(cdev)) {
836                 /* OK, we did everything we could... */
837                 css_sch_device_unregister(sch);
838                 return;
839         }
840         spin_lock_irq(sch->lock);
841         sch_set_cdev(sch, cdev);
842         spin_unlock_irq(sch->lock);
843         /* Start recognition for the new ccw device. */
844         if (io_subchannel_recog(cdev, sch)) {
845                 spin_lock_irq(sch->lock);
846                 sch_set_cdev(sch, NULL);
847                 spin_unlock_irq(sch->lock);
848                 if (cdev->dev.release)
849                         cdev->dev.release(&cdev->dev);
850                 css_sch_device_unregister(sch);
851         }
852 }
853
854
855 void ccw_device_move_to_orphanage(struct work_struct *work)
856 {
857         struct ccw_device_private *priv;
858         struct ccw_device *cdev;
859         struct ccw_device *replacing_cdev;
860         struct subchannel *sch;
861         int ret;
862         struct channel_subsystem *css;
863         struct ccw_dev_id dev_id;
864
865         priv = container_of(work, struct ccw_device_private, kick_work);
866         cdev = priv->cdev;
867         sch = to_subchannel(cdev->dev.parent);
868         css = to_css(sch->dev.parent);
869         dev_id.devno = sch->schib.pmcw.dev;
870         dev_id.ssid = sch->schid.ssid;
871
872         /*
873          * Move the orphaned ccw device to the orphanage so the replacing
874          * ccw device can take its place on the subchannel.
875          */
876         ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev);
877         if (ret) {
878                 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
879                               "(ret=%d)!\n", cdev->private->dev_id.ssid,
880                               cdev->private->dev_id.devno, ret);
881                 return;
882         }
883         cdev->ccwlock = css->pseudo_subchannel->lock;
884         /*
885          * Search for the replacing ccw device
886          * - among the disconnected devices
887          * - in the orphanage
888          */
889         replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
890         if (replacing_cdev) {
891                 sch_attach_disconnected_device(sch, replacing_cdev);
892                 return;
893         }
894         replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
895         if (replacing_cdev) {
896                 sch_attach_orphaned_device(sch, replacing_cdev);
897                 return;
898         }
899         sch_create_and_recog_new_device(sch);
900 }
901
902 /*
903  * Register recognized device.
904  */
905 static void
906 io_subchannel_register(struct work_struct *work)
907 {
908         struct ccw_device_private *priv;
909         struct ccw_device *cdev;
910         struct subchannel *sch;
911         int ret;
912         unsigned long flags;
913
914         priv = container_of(work, struct ccw_device_private, kick_work);
915         cdev = priv->cdev;
916         sch = to_subchannel(cdev->dev.parent);
917         css_update_ssd_info(sch);
918         /*
919          * io_subchannel_register() will also be called after device
920          * recognition has been done for a boxed device (which will already
921          * be registered). We need to reprobe since we may now have sense id
922          * information.
923          */
924         if (klist_node_attached(&cdev->dev.knode_parent)) {
925                 if (!cdev->drv) {
926                         ret = device_reprobe(&cdev->dev);
927                         if (ret)
928                                 /* We can't do much here. */
929                                 CIO_MSG_EVENT(0, "device_reprobe() returned"
930                                               " %d for 0.%x.%04x\n", ret,
931                                               cdev->private->dev_id.ssid,
932                                               cdev->private->dev_id.devno);
933                 }
934                 goto out;
935         }
936         /*
937          * Now we know this subchannel will stay, we can throw
938          * our delayed uevent.
939          */
940         sch->dev.uevent_suppress = 0;
941         kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
942         /* make it known to the system */
943         ret = ccw_device_register(cdev);
944         if (ret) {
945                 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
946                               cdev->private->dev_id.ssid,
947                               cdev->private->dev_id.devno, ret);
948                 put_device(&cdev->dev);
949                 spin_lock_irqsave(sch->lock, flags);
950                 sch_set_cdev(sch, NULL);
951                 spin_unlock_irqrestore(sch->lock, flags);
952                 kfree (cdev->private);
953                 kfree (cdev);
954                 put_device(&sch->dev);
955                 if (atomic_dec_and_test(&ccw_device_init_count))
956                         wake_up(&ccw_device_init_wq);
957                 return;
958         }
959         put_device(&cdev->dev);
960 out:
961         cdev->private->flags.recog_done = 1;
962         put_device(&sch->dev);
963         wake_up(&cdev->private->wait_q);
964         if (atomic_dec_and_test(&ccw_device_init_count))
965                 wake_up(&ccw_device_init_wq);
966 }
967
968 static void ccw_device_call_sch_unregister(struct work_struct *work)
969 {
970         struct ccw_device_private *priv;
971         struct ccw_device *cdev;
972         struct subchannel *sch;
973
974         priv = container_of(work, struct ccw_device_private, kick_work);
975         cdev = priv->cdev;
976         sch = to_subchannel(cdev->dev.parent);
977         css_sch_device_unregister(sch);
978         /* Reset intparm to zeroes. */
979         sch->schib.pmcw.intparm = 0;
980         cio_modify(sch);
981         put_device(&cdev->dev);
982         put_device(&sch->dev);
983 }
984
985 /*
986  * subchannel recognition done. Called from the state machine.
987  */
988 void
989 io_subchannel_recog_done(struct ccw_device *cdev)
990 {
991         struct subchannel *sch;
992
993         if (css_init_done == 0) {
994                 cdev->private->flags.recog_done = 1;
995                 return;
996         }
997         switch (cdev->private->state) {
998         case DEV_STATE_NOT_OPER:
999                 cdev->private->flags.recog_done = 1;
1000                 /* Remove device found not operational. */
1001                 if (!get_device(&cdev->dev))
1002                         break;
1003                 sch = to_subchannel(cdev->dev.parent);
1004                 PREPARE_WORK(&cdev->private->kick_work,
1005                              ccw_device_call_sch_unregister);
1006                 queue_work(slow_path_wq, &cdev->private->kick_work);
1007                 if (atomic_dec_and_test(&ccw_device_init_count))
1008                         wake_up(&ccw_device_init_wq);
1009                 break;
1010         case DEV_STATE_BOXED:
1011                 /* Device did not respond in time. */
1012         case DEV_STATE_OFFLINE:
1013                 /* 
1014                  * We can't register the device in interrupt context so
1015                  * we schedule a work item.
1016                  */
1017                 if (!get_device(&cdev->dev))
1018                         break;
1019                 PREPARE_WORK(&cdev->private->kick_work,
1020                              io_subchannel_register);
1021                 queue_work(slow_path_wq, &cdev->private->kick_work);
1022                 break;
1023         }
1024 }
1025
1026 static int
1027 io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1028 {
1029         int rc;
1030         struct ccw_device_private *priv;
1031
1032         sch_set_cdev(sch, cdev);
1033         cdev->ccwlock = sch->lock;
1034
1035         /* Init private data. */
1036         priv = cdev->private;
1037         priv->dev_id.devno = sch->schib.pmcw.dev;
1038         priv->dev_id.ssid = sch->schid.ssid;
1039         priv->schid = sch->schid;
1040         priv->state = DEV_STATE_NOT_OPER;
1041         INIT_LIST_HEAD(&priv->cmb_list);
1042         init_waitqueue_head(&priv->wait_q);
1043         init_timer(&priv->timer);
1044
1045         /* Set an initial name for the device. */
1046         snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
1047                   sch->schid.ssid, sch->schib.pmcw.dev);
1048
1049         /* Increase counter of devices currently in recognition. */
1050         atomic_inc(&ccw_device_init_count);
1051
1052         /* Start async. device sensing. */
1053         spin_lock_irq(sch->lock);
1054         rc = ccw_device_recognition(cdev);
1055         spin_unlock_irq(sch->lock);
1056         if (rc) {
1057                 if (atomic_dec_and_test(&ccw_device_init_count))
1058                         wake_up(&ccw_device_init_wq);
1059         }
1060         return rc;
1061 }
1062
1063 static void ccw_device_move_to_sch(struct work_struct *work)
1064 {
1065         struct ccw_device_private *priv;
1066         int rc;
1067         struct subchannel *sch;
1068         struct ccw_device *cdev;
1069         struct subchannel *former_parent;
1070
1071         priv = container_of(work, struct ccw_device_private, kick_work);
1072         sch = priv->sch;
1073         cdev = priv->cdev;
1074         former_parent = ccw_device_is_orphan(cdev) ?
1075                 NULL : to_subchannel(get_device(cdev->dev.parent));
1076         mutex_lock(&sch->reg_mutex);
1077         /* Try to move the ccw device to its new subchannel. */
1078         rc = device_move(&cdev->dev, &sch->dev);
1079         mutex_unlock(&sch->reg_mutex);
1080         if (rc) {
1081                 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel "
1082                               "0.%x.%04x failed (ret=%d)!\n",
1083                               cdev->private->dev_id.ssid,
1084                               cdev->private->dev_id.devno, sch->schid.ssid,
1085                               sch->schid.sch_no, rc);
1086                 css_sch_device_unregister(sch);
1087                 goto out;
1088         }
1089         if (former_parent) {
1090                 spin_lock_irq(former_parent->lock);
1091                 sch_set_cdev(former_parent, NULL);
1092                 spin_unlock_irq(former_parent->lock);
1093                 css_sch_device_unregister(former_parent);
1094                 /* Reset intparm to zeroes. */
1095                 former_parent->schib.pmcw.intparm = 0;
1096                 cio_modify(former_parent);
1097         }
1098         sch_attach_device(sch, cdev);
1099 out:
1100         if (former_parent)
1101                 put_device(&former_parent->dev);
1102         put_device(&cdev->dev);
1103 }
1104
1105 static void io_subchannel_irq(struct subchannel *sch)
1106 {
1107         struct ccw_device *cdev;
1108
1109         cdev = sch_get_cdev(sch);
1110
1111         CIO_TRACE_EVENT(3, "IRQ");
1112         CIO_TRACE_EVENT(3, sch->dev.bus_id);
1113         if (cdev)
1114                 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1115 }
1116
1117 static void io_subchannel_init_fields(struct subchannel *sch)
1118 {
1119         if (cio_is_console(sch->schid))
1120                 sch->opm = 0xff;
1121         else
1122                 sch->opm = chp_get_sch_opm(sch);
1123         sch->lpm = sch->schib.pmcw.pam & sch->opm;
1124         sch->isc = cio_is_console(sch->schid) ? 1 : 3;
1125
1126         CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1127                       " - PIM = %02X, PAM = %02X, POM = %02X\n",
1128                       sch->schib.pmcw.dev, sch->schid.ssid,
1129                       sch->schid.sch_no, sch->schib.pmcw.pim,
1130                       sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1131         /* Initially set up some fields in the pmcw. */
1132         sch->schib.pmcw.ena = 0;
1133         sch->schib.pmcw.csense = 1;     /* concurrent sense */
1134         if ((sch->lpm & (sch->lpm - 1)) != 0)
1135                 sch->schib.pmcw.mp = 1; /* multipath mode */
1136         /* clean up possible residual cmf stuff */
1137         sch->schib.pmcw.mme = 0;
1138         sch->schib.pmcw.mbfc = 0;
1139         sch->schib.pmcw.mbi = 0;
1140         sch->schib.mba = 0;
1141 }
1142
1143 static int io_subchannel_probe(struct subchannel *sch)
1144 {
1145         struct ccw_device *cdev;
1146         int rc;
1147         unsigned long flags;
1148         struct ccw_dev_id dev_id;
1149
1150         cdev = sch_get_cdev(sch);
1151         if (cdev) {
1152                 rc = sysfs_create_group(&sch->dev.kobj,
1153                                         &io_subchannel_attr_group);
1154                 if (rc)
1155                         CIO_MSG_EVENT(0, "Failed to create io subchannel "
1156                                       "attributes for subchannel "
1157                                       "0.%x.%04x (rc=%d)\n",
1158                                       sch->schid.ssid, sch->schid.sch_no, rc);
1159                 /*
1160                  * This subchannel already has an associated ccw_device.
1161                  * Throw the delayed uevent for the subchannel, register
1162                  * the ccw_device and exit. This happens for all early
1163                  * devices, e.g. the console.
1164                  */
1165                 sch->dev.uevent_suppress = 0;
1166                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1167                 cdev->dev.groups = ccwdev_attr_groups;
1168                 device_initialize(&cdev->dev);
1169                 ccw_device_register(cdev);
1170                 /*
1171                  * Check if the device is already online. If it is
1172                  * the reference count needs to be corrected
1173                  * (see ccw_device_online and css_init_done for the
1174                  * ugly details).
1175                  */
1176                 if (cdev->private->state != DEV_STATE_NOT_OPER &&
1177                     cdev->private->state != DEV_STATE_OFFLINE &&
1178                     cdev->private->state != DEV_STATE_BOXED)
1179                         get_device(&cdev->dev);
1180                 return 0;
1181         }
1182         io_subchannel_init_fields(sch);
1183         /*
1184          * First check if a fitting device may be found amongst the
1185          * disconnected devices or in the orphanage.
1186          */
1187         dev_id.devno = sch->schib.pmcw.dev;
1188         dev_id.ssid = sch->schid.ssid;
1189         rc = sysfs_create_group(&sch->dev.kobj,
1190                                 &io_subchannel_attr_group);
1191         if (rc)
1192                 return rc;
1193         /* Allocate I/O subchannel private data. */
1194         sch->private = kzalloc(sizeof(struct io_subchannel_private),
1195                                GFP_KERNEL | GFP_DMA);
1196         if (!sch->private) {
1197                 rc = -ENOMEM;
1198                 goto out_err;
1199         }
1200         cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1201         if (!cdev)
1202                 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
1203                                                      &dev_id);
1204         if (cdev) {
1205                 /*
1206                  * Schedule moving the device until when we have a registered
1207                  * subchannel to move to and succeed the probe. We can
1208                  * unregister later again, when the probe is through.
1209                  */
1210                 cdev->private->sch = sch;
1211                 PREPARE_WORK(&cdev->private->kick_work,
1212                              ccw_device_move_to_sch);
1213                 queue_work(slow_path_wq, &cdev->private->kick_work);
1214                 return 0;
1215         }
1216         cdev = io_subchannel_create_ccwdev(sch);
1217         if (IS_ERR(cdev)) {
1218                 rc = PTR_ERR(cdev);
1219                 goto out_err;
1220         }
1221         rc = io_subchannel_recog(cdev, sch);
1222         if (rc) {
1223                 spin_lock_irqsave(sch->lock, flags);
1224                 sch_set_cdev(sch, NULL);
1225                 spin_unlock_irqrestore(sch->lock, flags);
1226                 if (cdev->dev.release)
1227                         cdev->dev.release(&cdev->dev);
1228                 goto out_err;
1229         }
1230         return 0;
1231 out_err:
1232         kfree(sch->private);
1233         sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1234         return rc;
1235 }
1236
1237 static int
1238 io_subchannel_remove (struct subchannel *sch)
1239 {
1240         struct ccw_device *cdev;
1241         unsigned long flags;
1242
1243         cdev = sch_get_cdev(sch);
1244         if (!cdev)
1245                 return 0;
1246         /* Set ccw device to not operational and drop reference. */
1247         spin_lock_irqsave(cdev->ccwlock, flags);
1248         sch_set_cdev(sch, NULL);
1249         cdev->private->state = DEV_STATE_NOT_OPER;
1250         spin_unlock_irqrestore(cdev->ccwlock, flags);
1251         ccw_device_unregister(cdev);
1252         put_device(&cdev->dev);
1253         kfree(sch->private);
1254         sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1255         return 0;
1256 }
1257
1258 static int io_subchannel_notify(struct subchannel *sch, int event)
1259 {
1260         struct ccw_device *cdev;
1261
1262         cdev = sch_get_cdev(sch);
1263         if (!cdev)
1264                 return 0;
1265         return ccw_device_notify(cdev, event);
1266 }
1267
1268 static void io_subchannel_verify(struct subchannel *sch)
1269 {
1270         struct ccw_device *cdev;
1271
1272         cdev = sch_get_cdev(sch);
1273         if (cdev)
1274                 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1275 }
1276
1277 static int check_for_io_on_path(struct subchannel *sch, int mask)
1278 {
1279         int cc;
1280
1281         cc = stsch(sch->schid, &sch->schib);
1282         if (cc)
1283                 return 0;
1284         if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
1285                 return 1;
1286         return 0;
1287 }
1288
1289 static void terminate_internal_io(struct subchannel *sch,
1290                                   struct ccw_device *cdev)
1291 {
1292         if (cio_clear(sch)) {
1293                 /* Recheck device in case clear failed. */
1294                 sch->lpm = 0;
1295                 if (cdev->online)
1296                         dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1297                 else
1298                         css_schedule_eval(sch->schid);
1299                 return;
1300         }
1301         cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1302         /* Request retry of internal operation. */
1303         cdev->private->flags.intretry = 1;
1304         /* Call handler. */
1305         if (cdev->handler)
1306                 cdev->handler(cdev, cdev->private->intparm,
1307                               ERR_PTR(-EIO));
1308 }
1309
1310 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1311 {
1312         struct ccw_device *cdev;
1313
1314         cdev = sch_get_cdev(sch);
1315         if (!cdev)
1316                 return;
1317         if (check_for_io_on_path(sch, mask)) {
1318                 if (cdev->private->state == DEV_STATE_ONLINE)
1319                         ccw_device_kill_io(cdev);
1320                 else {
1321                         terminate_internal_io(sch, cdev);
1322                         /* Re-start path verification. */
1323                         dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1324                 }
1325         } else
1326                 /* trigger path verification. */
1327                 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1328
1329 }
1330
1331 static int io_subchannel_chp_event(struct subchannel *sch, void *data,
1332                                    int event)
1333 {
1334         int mask;
1335         struct res_acc_data *res_data;
1336
1337         res_data = data;
1338         mask = chp_ssd_get_mask(&sch->ssd_info, res_data);
1339         if (!mask)
1340                 return 0;
1341         switch (event) {
1342         case CHP_VARY_OFF:
1343                 sch->opm &= ~mask;
1344                 sch->lpm &= ~mask;
1345                 io_subchannel_terminate_path(sch, mask);
1346                 break;
1347         case CHP_VARY_ON:
1348                 sch->opm |= mask;
1349                 sch->lpm |= mask;
1350                 io_subchannel_verify(sch);
1351                 break;
1352         case CHP_OFFLINE:
1353                 if (stsch(sch->schid, &sch->schib))
1354                         return -ENXIO;
1355                 if (!css_sch_is_valid(&sch->schib))
1356                         return -ENODEV;
1357                 io_subchannel_terminate_path(sch, mask);
1358                 break;
1359         case CHP_ONLINE:
1360                 if (stsch(sch->schid, &sch->schib))
1361                         return -ENXIO;
1362                 sch->lpm |= mask & sch->opm;
1363                 io_subchannel_verify(sch);
1364                 break;
1365         }
1366         return 0;
1367 }
1368
1369 static void
1370 io_subchannel_shutdown(struct subchannel *sch)
1371 {
1372         struct ccw_device *cdev;
1373         int ret;
1374
1375         cdev = sch_get_cdev(sch);
1376
1377         if (cio_is_console(sch->schid))
1378                 return;
1379         if (!sch->schib.pmcw.ena)
1380                 /* Nothing to do. */
1381                 return;
1382         ret = cio_disable_subchannel(sch);
1383         if (ret != -EBUSY)
1384                 /* Subchannel is disabled, we're done. */
1385                 return;
1386         cdev->private->state = DEV_STATE_QUIESCE;
1387         if (cdev->handler)
1388                 cdev->handler(cdev, cdev->private->intparm,
1389                               ERR_PTR(-EIO));
1390         ret = ccw_device_cancel_halt_clear(cdev);
1391         if (ret == -EBUSY) {
1392                 ccw_device_set_timeout(cdev, HZ/10);
1393                 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1394         }
1395         cio_disable_subchannel(sch);
1396 }
1397
1398 static int io_subchannel_get_status(struct subchannel *sch)
1399 {
1400         struct schib schib;
1401
1402         if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
1403                 return CIO_GONE;
1404         if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
1405                 return CIO_REVALIDATE;
1406         if (!sch->lpm)
1407                 return CIO_NO_PATH;
1408         return CIO_OPER;
1409 }
1410
1411 static int device_is_disconnected(struct ccw_device *cdev)
1412 {
1413         if (!cdev)
1414                 return 0;
1415         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1416                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1417 }
1418
1419 static int recovery_check(struct device *dev, void *data)
1420 {
1421         struct ccw_device *cdev = to_ccwdev(dev);
1422         int *redo = data;
1423
1424         spin_lock_irq(cdev->ccwlock);
1425         switch (cdev->private->state) {
1426         case DEV_STATE_DISCONNECTED:
1427                 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1428                               cdev->private->dev_id.ssid,
1429                               cdev->private->dev_id.devno);
1430                 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1431                 *redo = 1;
1432                 break;
1433         case DEV_STATE_DISCONNECTED_SENSE_ID:
1434                 *redo = 1;
1435                 break;
1436         }
1437         spin_unlock_irq(cdev->ccwlock);
1438
1439         return 0;
1440 }
1441
1442 static void recovery_work_func(struct work_struct *unused)
1443 {
1444         int redo = 0;
1445
1446         bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1447         if (redo) {
1448                 spin_lock_irq(&recovery_lock);
1449                 if (!timer_pending(&recovery_timer)) {
1450                         if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1451                                 recovery_phase++;
1452                         mod_timer(&recovery_timer, jiffies +
1453                                   recovery_delay[recovery_phase] * HZ);
1454                 }
1455                 spin_unlock_irq(&recovery_lock);
1456         } else
1457                 CIO_MSG_EVENT(4, "recovery: end\n");
1458 }
1459
1460 static DECLARE_WORK(recovery_work, recovery_work_func);
1461
1462 static void recovery_func(unsigned long data)
1463 {
1464         /*
1465          * We can't do our recovery in softirq context and it's not
1466          * performance critical, so we schedule it.
1467          */
1468         schedule_work(&recovery_work);
1469 }
1470
1471 static void ccw_device_schedule_recovery(void)
1472 {
1473         unsigned long flags;
1474
1475         CIO_MSG_EVENT(4, "recovery: schedule\n");
1476         spin_lock_irqsave(&recovery_lock, flags);
1477         if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1478                 recovery_phase = 0;
1479                 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1480         }
1481         spin_unlock_irqrestore(&recovery_lock, flags);
1482 }
1483
1484 static void device_set_disconnected(struct ccw_device *cdev)
1485 {
1486         if (!cdev)
1487                 return;
1488         ccw_device_set_timeout(cdev, 0);
1489         cdev->private->flags.fake_irb = 0;
1490         cdev->private->state = DEV_STATE_DISCONNECTED;
1491         if (cdev->online)
1492                 ccw_device_schedule_recovery();
1493 }
1494
1495 static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1496 {
1497         int event, ret, disc;
1498         unsigned long flags;
1499         enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
1500         struct ccw_device *cdev;
1501
1502         spin_lock_irqsave(sch->lock, flags);
1503         cdev = sch_get_cdev(sch);
1504         disc = device_is_disconnected(cdev);
1505         if (disc && slow) {
1506                 /* Disconnected devices are evaluated directly only.*/
1507                 spin_unlock_irqrestore(sch->lock, flags);
1508                 return 0;
1509         }
1510         /* No interrupt after machine check - kill pending timers. */
1511         if (cdev)
1512                 ccw_device_set_timeout(cdev, 0);
1513         if (!disc && !slow) {
1514                 /* Non-disconnected devices are evaluated on the slow path. */
1515                 spin_unlock_irqrestore(sch->lock, flags);
1516                 return -EAGAIN;
1517         }
1518         event = io_subchannel_get_status(sch);
1519         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
1520                       sch->schid.ssid, sch->schid.sch_no, event,
1521                       disc ? "disconnected" : "normal",
1522                       slow ? "slow" : "fast");
1523         /* Analyze subchannel status. */
1524         action = NONE;
1525         switch (event) {
1526         case CIO_NO_PATH:
1527                 if (disc) {
1528                         /* Check if paths have become available. */
1529                         action = REPROBE;
1530                         break;
1531                 }
1532                 /* fall through */
1533         case CIO_GONE:
1534                 /* Prevent unwanted effects when opening lock. */
1535                 cio_disable_subchannel(sch);
1536                 device_set_disconnected(cdev);
1537                 /* Ask driver what to do with device. */
1538                 action = UNREGISTER;
1539                 spin_unlock_irqrestore(sch->lock, flags);
1540                 ret = io_subchannel_notify(sch, event);
1541                 spin_lock_irqsave(sch->lock, flags);
1542                 if (ret)
1543                         action = NONE;
1544                 break;
1545         case CIO_REVALIDATE:
1546                 /* Device will be removed, so no notify necessary. */
1547                 if (disc)
1548                         /* Reprobe because immediate unregister might block. */
1549                         action = REPROBE;
1550                 else
1551                         action = UNREGISTER_PROBE;
1552                 break;
1553         case CIO_OPER:
1554                 if (disc)
1555                         /* Get device operational again. */
1556                         action = REPROBE;
1557                 break;
1558         }
1559         /* Perform action. */
1560         ret = 0;
1561         switch (action) {
1562         case UNREGISTER:
1563         case UNREGISTER_PROBE:
1564                 /* Unregister device (will use subchannel lock). */
1565                 spin_unlock_irqrestore(sch->lock, flags);
1566                 css_sch_device_unregister(sch);
1567                 spin_lock_irqsave(sch->lock, flags);
1568
1569                 /* Reset intparm to zeroes. */
1570                 sch->schib.pmcw.intparm = 0;
1571                 cio_modify(sch);
1572                 break;
1573         case REPROBE:
1574                 ccw_device_trigger_reprobe(cdev);
1575                 break;
1576         default:
1577                 break;
1578         }
1579         spin_unlock_irqrestore(sch->lock, flags);
1580         /* Probe if necessary. */
1581         if (action == UNREGISTER_PROBE)
1582                 ret = css_probe_device(sch->schid);
1583
1584         return ret;
1585 }
1586
1587 #ifdef CONFIG_CCW_CONSOLE
1588 static struct ccw_device console_cdev;
1589 static struct ccw_device_private console_private;
1590 static int console_cdev_in_use;
1591
1592 static DEFINE_SPINLOCK(ccw_console_lock);
1593
1594 spinlock_t * cio_get_console_lock(void)
1595 {
1596         return &ccw_console_lock;
1597 }
1598
1599 static int ccw_device_console_enable(struct ccw_device *cdev,
1600                                      struct subchannel *sch)
1601 {
1602         int rc;
1603
1604         /* Attach subchannel private data. */
1605         sch->private = cio_get_console_priv();
1606         memset(sch->private, 0, sizeof(struct io_subchannel_private));
1607         io_subchannel_init_fields(sch);
1608         sch->driver = &io_subchannel_driver;
1609         /* Initialize the ccw_device structure. */
1610         cdev->dev.parent= &sch->dev;
1611         rc = io_subchannel_recog(cdev, sch);
1612         if (rc)
1613                 return rc;
1614
1615         /* Now wait for the async. recognition to come to an end. */
1616         spin_lock_irq(cdev->ccwlock);
1617         while (!dev_fsm_final_state(cdev))
1618                 wait_cons_dev();
1619         rc = -EIO;
1620         if (cdev->private->state != DEV_STATE_OFFLINE)
1621                 goto out_unlock;
1622         ccw_device_online(cdev);
1623         while (!dev_fsm_final_state(cdev))
1624                 wait_cons_dev();
1625         if (cdev->private->state != DEV_STATE_ONLINE)
1626                 goto out_unlock;
1627         rc = 0;
1628 out_unlock:
1629         spin_unlock_irq(cdev->ccwlock);
1630         return 0;
1631 }
1632
1633 struct ccw_device *
1634 ccw_device_probe_console(void)
1635 {
1636         struct subchannel *sch;
1637         int ret;
1638
1639         if (xchg(&console_cdev_in_use, 1) != 0)
1640                 return ERR_PTR(-EBUSY);
1641         sch = cio_probe_console();
1642         if (IS_ERR(sch)) {
1643                 console_cdev_in_use = 0;
1644                 return (void *) sch;
1645         }
1646         memset(&console_cdev, 0, sizeof(struct ccw_device));
1647         memset(&console_private, 0, sizeof(struct ccw_device_private));
1648         console_cdev.private = &console_private;
1649         console_private.cdev = &console_cdev;
1650         ret = ccw_device_console_enable(&console_cdev, sch);
1651         if (ret) {
1652                 cio_release_console();
1653                 console_cdev_in_use = 0;
1654                 return ERR_PTR(ret);
1655         }
1656         console_cdev.online = 1;
1657         return &console_cdev;
1658 }
1659 #endif
1660
1661 /*
1662  * get ccw_device matching the busid, but only if owned by cdrv
1663  */
1664 static int
1665 __ccwdev_check_busid(struct device *dev, void *id)
1666 {
1667         char *bus_id;
1668
1669         bus_id = id;
1670
1671         return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0);
1672 }
1673
1674
1675 /**
1676  * get_ccwdev_by_busid() - obtain device from a bus id
1677  * @cdrv: driver the device is owned by
1678  * @bus_id: bus id of the device to be searched
1679  *
1680  * This function searches all devices owned by @cdrv for a device with a bus
1681  * id matching @bus_id.
1682  * Returns:
1683  *  If a match is found, its reference count of the found device is increased
1684  *  and it is returned; else %NULL is returned.
1685  */
1686 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1687                                        const char *bus_id)
1688 {
1689         struct device *dev;
1690         struct device_driver *drv;
1691
1692         drv = get_driver(&cdrv->driver);
1693         if (!drv)
1694                 return NULL;
1695
1696         dev = driver_find_device(drv, NULL, (void *)bus_id,
1697                                  __ccwdev_check_busid);
1698         put_driver(drv);
1699
1700         return dev ? to_ccwdev(dev) : NULL;
1701 }
1702
1703 /************************** device driver handling ************************/
1704
1705 /* This is the implementation of the ccw_driver class. The probe, remove
1706  * and release methods are initially very similar to the device_driver
1707  * implementations, with the difference that they have ccw_device
1708  * arguments.
1709  *
1710  * A ccw driver also contains the information that is needed for
1711  * device matching.
1712  */
1713 static int
1714 ccw_device_probe (struct device *dev)
1715 {
1716         struct ccw_device *cdev = to_ccwdev(dev);
1717         struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1718         int ret;
1719
1720         cdev->drv = cdrv; /* to let the driver call _set_online */
1721
1722         ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1723
1724         if (ret) {
1725                 cdev->drv = NULL;
1726                 return ret;
1727         }
1728
1729         return 0;
1730 }
1731
1732 static int
1733 ccw_device_remove (struct device *dev)
1734 {
1735         struct ccw_device *cdev = to_ccwdev(dev);
1736         struct ccw_driver *cdrv = cdev->drv;
1737         int ret;
1738
1739         if (cdrv->remove)
1740                 cdrv->remove(cdev);
1741         if (cdev->online) {
1742                 cdev->online = 0;
1743                 spin_lock_irq(cdev->ccwlock);
1744                 ret = ccw_device_offline(cdev);
1745                 spin_unlock_irq(cdev->ccwlock);
1746                 if (ret == 0)
1747                         wait_event(cdev->private->wait_q,
1748                                    dev_fsm_final_state(cdev));
1749                 else
1750                         CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1751                                       "device 0.%x.%04x\n",
1752                                       ret, cdev->private->dev_id.ssid,
1753                                       cdev->private->dev_id.devno);
1754         }
1755         ccw_device_set_timeout(cdev, 0);
1756         cdev->drv = NULL;
1757         return 0;
1758 }
1759
1760 static void ccw_device_shutdown(struct device *dev)
1761 {
1762         struct ccw_device *cdev;
1763
1764         cdev = to_ccwdev(dev);
1765         if (cdev->drv && cdev->drv->shutdown)
1766                 cdev->drv->shutdown(cdev);
1767         disable_cmf(cdev);
1768 }
1769
1770 struct bus_type ccw_bus_type = {
1771         .name   = "ccw",
1772         .match  = ccw_bus_match,
1773         .uevent = ccw_uevent,
1774         .probe  = ccw_device_probe,
1775         .remove = ccw_device_remove,
1776         .shutdown = ccw_device_shutdown,
1777 };
1778
1779 /**
1780  * ccw_driver_register() - register a ccw driver
1781  * @cdriver: driver to be registered
1782  *
1783  * This function is mainly a wrapper around driver_register().
1784  * Returns:
1785  *   %0 on success and a negative error value on failure.
1786  */
1787 int ccw_driver_register(struct ccw_driver *cdriver)
1788 {
1789         struct device_driver *drv = &cdriver->driver;
1790
1791         drv->bus = &ccw_bus_type;
1792         drv->name = cdriver->name;
1793         drv->owner = cdriver->owner;
1794
1795         return driver_register(drv);
1796 }
1797
1798 /**
1799  * ccw_driver_unregister() - deregister a ccw driver
1800  * @cdriver: driver to be deregistered
1801  *
1802  * This function is mainly a wrapper around driver_unregister().
1803  */
1804 void ccw_driver_unregister(struct ccw_driver *cdriver)
1805 {
1806         driver_unregister(&cdriver->driver);
1807 }
1808
1809 /* Helper func for qdio. */
1810 struct subchannel_id
1811 ccw_device_get_subchannel_id(struct ccw_device *cdev)
1812 {
1813         struct subchannel *sch;
1814
1815         sch = to_subchannel(cdev->dev.parent);
1816         return sch->schid;
1817 }
1818
1819 MODULE_LICENSE("GPL");
1820 EXPORT_SYMBOL(ccw_device_set_online);
1821 EXPORT_SYMBOL(ccw_device_set_offline);
1822 EXPORT_SYMBOL(ccw_driver_register);
1823 EXPORT_SYMBOL(ccw_driver_unregister);
1824 EXPORT_SYMBOL(get_ccwdev_by_busid);
1825 EXPORT_SYMBOL(ccw_bus_type);
1826 EXPORT_SYMBOL(ccw_device_work);
1827 EXPORT_SYMBOL(ccw_device_notify_work);
1828 EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);