Merge commit 'v2.6.28-rc2' into core/locking
[linux-2.6] / drivers / s390 / cio / device_fsm.c
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright IBM Corp. 2002,2008
6  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  */
9
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/jiffies.h>
13 #include <linux/string.h>
14
15 #include <asm/ccwdev.h>
16 #include <asm/cio.h>
17 #include <asm/chpid.h>
18
19 #include "cio.h"
20 #include "cio_debug.h"
21 #include "css.h"
22 #include "device.h"
23 #include "chsc.h"
24 #include "ioasm.h"
25 #include "chp.h"
26
27 static int timeout_log_enabled;
28
29 static int __init ccw_timeout_log_setup(char *unused)
30 {
31         timeout_log_enabled = 1;
32         return 1;
33 }
34
35 __setup("ccw_timeout_log", ccw_timeout_log_setup);
36
37 static void ccw_timeout_log(struct ccw_device *cdev)
38 {
39         struct schib schib;
40         struct subchannel *sch;
41         struct io_subchannel_private *private;
42         union orb *orb;
43         int cc;
44
45         sch = to_subchannel(cdev->dev.parent);
46         private = to_io_private(sch);
47         orb = &private->orb;
48         cc = stsch(sch->schid, &schib);
49
50         printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
51                "device information:\n", get_clock());
52         printk(KERN_WARNING "cio: orb:\n");
53         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
54                        orb, sizeof(*orb), 0);
55         printk(KERN_WARNING "cio: ccw device bus id: %s\n",
56                dev_name(&cdev->dev));
57         printk(KERN_WARNING "cio: subchannel bus id: %s\n",
58                dev_name(&sch->dev));
59         printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
60                "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
61
62         if (orb->tm.b) {
63                 printk(KERN_WARNING "cio: orb indicates transport mode\n");
64                 printk(KERN_WARNING "cio: last tcw:\n");
65                 print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
66                                (void *)(addr_t)orb->tm.tcw,
67                                sizeof(struct tcw), 0);
68         } else {
69                 printk(KERN_WARNING "cio: orb indicates command mode\n");
70                 if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
71                     (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
72                         printk(KERN_WARNING "cio: last channel program "
73                                "(intern):\n");
74                 else
75                         printk(KERN_WARNING "cio: last channel program:\n");
76
77                 print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
78                                (void *)(addr_t)orb->cmd.cpa,
79                                sizeof(struct ccw1), 0);
80         }
81         printk(KERN_WARNING "cio: ccw device state: %d\n",
82                cdev->private->state);
83         printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
84         printk(KERN_WARNING "cio: schib:\n");
85         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
86                        &schib, sizeof(schib), 0);
87         printk(KERN_WARNING "cio: ccw device flags:\n");
88         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
89                        &cdev->private->flags, sizeof(cdev->private->flags), 0);
90 }
91
92 /*
93  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
94  */
95 static void
96 ccw_device_timeout(unsigned long data)
97 {
98         struct ccw_device *cdev;
99
100         cdev = (struct ccw_device *) data;
101         spin_lock_irq(cdev->ccwlock);
102         if (timeout_log_enabled)
103                 ccw_timeout_log(cdev);
104         dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
105         spin_unlock_irq(cdev->ccwlock);
106 }
107
108 /*
109  * Set timeout
110  */
111 void
112 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
113 {
114         if (expires == 0) {
115                 del_timer(&cdev->private->timer);
116                 return;
117         }
118         if (timer_pending(&cdev->private->timer)) {
119                 if (mod_timer(&cdev->private->timer, jiffies + expires))
120                         return;
121         }
122         cdev->private->timer.function = ccw_device_timeout;
123         cdev->private->timer.data = (unsigned long) cdev;
124         cdev->private->timer.expires = jiffies + expires;
125         add_timer(&cdev->private->timer);
126 }
127
128 /*
129  * Cancel running i/o. This is called repeatedly since halt/clear are
130  * asynchronous operations. We do one try with cio_cancel, two tries
131  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
132  * Returns 0 if device now idle, -ENODEV for device not operational and
133  * -EBUSY if an interrupt is expected (either from halt/clear or from a
134  * status pending).
135  */
136 int
137 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
138 {
139         struct subchannel *sch;
140         int ret;
141
142         sch = to_subchannel(cdev->dev.parent);
143         ret = stsch(sch->schid, &sch->schib);
144         if (ret || !sch->schib.pmcw.dnv)
145                 return -ENODEV; 
146         if (!sch->schib.pmcw.ena)
147                 /* Not operational -> done. */
148                 return 0;
149         /* Stage 1: cancel io. */
150         if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
151             !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
152                 if (!scsw_is_tm(&sch->schib.scsw)) {
153                         ret = cio_cancel(sch);
154                         if (ret != -EINVAL)
155                                 return ret;
156                 }
157                 /* cancel io unsuccessful or not applicable (transport mode).
158                  * Continue with asynchronous instructions. */
159                 cdev->private->iretry = 3;      /* 3 halt retries. */
160         }
161         if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
162                 /* Stage 2: halt io. */
163                 if (cdev->private->iretry) {
164                         cdev->private->iretry--;
165                         ret = cio_halt(sch);
166                         if (ret != -EBUSY)
167                                 return (ret == 0) ? -EBUSY : ret;
168                 }
169                 /* halt io unsuccessful. */
170                 cdev->private->iretry = 255;    /* 255 clear retries. */
171         }
172         /* Stage 3: clear io. */
173         if (cdev->private->iretry) {
174                 cdev->private->iretry--;
175                 ret = cio_clear (sch);
176                 return (ret == 0) ? -EBUSY : ret;
177         }
178         panic("Can't stop i/o on subchannel.\n");
179 }
180
181 static int
182 ccw_device_handle_oper(struct ccw_device *cdev)
183 {
184         struct subchannel *sch;
185
186         sch = to_subchannel(cdev->dev.parent);
187         cdev->private->flags.recog_done = 1;
188         /*
189          * Check if cu type and device type still match. If
190          * not, it is certainly another device and we have to
191          * de- and re-register.
192          */
193         if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
194             cdev->id.cu_model != cdev->private->senseid.cu_model ||
195             cdev->id.dev_type != cdev->private->senseid.dev_type ||
196             cdev->id.dev_model != cdev->private->senseid.dev_model) {
197                 PREPARE_WORK(&cdev->private->kick_work,
198                              ccw_device_do_unreg_rereg);
199                 queue_work(ccw_device_work, &cdev->private->kick_work);
200                 return 0;
201         }
202         cdev->private->flags.donotify = 1;
203         return 1;
204 }
205
206 /*
207  * The machine won't give us any notification by machine check if a chpid has
208  * been varied online on the SE so we have to find out by magic (i. e. driving
209  * the channel subsystem to device selection and updating our path masks).
210  */
211 static void
212 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
213 {
214         int mask, i;
215         struct chp_id chpid;
216
217         chp_id_init(&chpid);
218         for (i = 0; i<8; i++) {
219                 mask = 0x80 >> i;
220                 if (!(sch->lpm & mask))
221                         continue;
222                 if (old_lpm & mask)
223                         continue;
224                 chpid.id = sch->schib.pmcw.chpid[i];
225                 if (!chp_is_registered(chpid))
226                         css_schedule_eval_all();
227         }
228 }
229
230 /*
231  * Stop device recognition.
232  */
233 static void
234 ccw_device_recog_done(struct ccw_device *cdev, int state)
235 {
236         struct subchannel *sch;
237         int notify, old_lpm, same_dev;
238
239         sch = to_subchannel(cdev->dev.parent);
240
241         ccw_device_set_timeout(cdev, 0);
242         cio_disable_subchannel(sch);
243         /*
244          * Now that we tried recognition, we have performed device selection
245          * through ssch() and the path information is up to date.
246          */
247         old_lpm = sch->lpm;
248         stsch(sch->schid, &sch->schib);
249         sch->lpm = sch->schib.pmcw.pam & sch->opm;
250         /* Check since device may again have become not operational. */
251         if (!sch->schib.pmcw.dnv)
252                 state = DEV_STATE_NOT_OPER;
253         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
254                 /* Force reprobe on all chpids. */
255                 old_lpm = 0;
256         if (sch->lpm != old_lpm)
257                 __recover_lost_chpids(sch, old_lpm);
258         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
259                 if (state == DEV_STATE_NOT_OPER) {
260                         cdev->private->flags.recog_done = 1;
261                         cdev->private->state = DEV_STATE_DISCONNECTED;
262                         return;
263                 }
264                 /* Boxed devices don't need extra treatment. */
265         }
266         notify = 0;
267         same_dev = 0; /* Keep the compiler quiet... */
268         switch (state) {
269         case DEV_STATE_NOT_OPER:
270                 CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
271                               "subchannel 0.%x.%04x\n",
272                               cdev->private->dev_id.devno,
273                               sch->schid.ssid, sch->schid.sch_no);
274                 break;
275         case DEV_STATE_OFFLINE:
276                 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
277                         same_dev = ccw_device_handle_oper(cdev);
278                         notify = 1;
279                 }
280                 /* fill out sense information */
281                 memset(&cdev->id, 0, sizeof(cdev->id));
282                 cdev->id.cu_type   = cdev->private->senseid.cu_type;
283                 cdev->id.cu_model  = cdev->private->senseid.cu_model;
284                 cdev->id.dev_type  = cdev->private->senseid.dev_type;
285                 cdev->id.dev_model = cdev->private->senseid.dev_model;
286                 if (notify) {
287                         cdev->private->state = DEV_STATE_OFFLINE;
288                         if (same_dev) {
289                                 /* Get device online again. */
290                                 ccw_device_online(cdev);
291                                 wake_up(&cdev->private->wait_q);
292                         }
293                         return;
294                 }
295                 /* Issue device info message. */
296                 CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
297                               "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
298                               "%04X/%02X\n",
299                               cdev->private->dev_id.ssid,
300                               cdev->private->dev_id.devno,
301                               cdev->id.cu_type, cdev->id.cu_model,
302                               cdev->id.dev_type, cdev->id.dev_model);
303                 break;
304         case DEV_STATE_BOXED:
305                 CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
306                               " subchannel 0.%x.%04x\n",
307                               cdev->private->dev_id.devno,
308                               sch->schid.ssid, sch->schid.sch_no);
309                 break;
310         }
311         cdev->private->state = state;
312         io_subchannel_recog_done(cdev);
313         if (state != DEV_STATE_NOT_OPER)
314                 wake_up(&cdev->private->wait_q);
315 }
316
317 /*
318  * Function called from device_id.c after sense id has completed.
319  */
320 void
321 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
322 {
323         switch (err) {
324         case 0:
325                 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
326                 break;
327         case -ETIME:            /* Sense id stopped by timeout. */
328                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
329                 break;
330         default:
331                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
332                 break;
333         }
334 }
335
336 int ccw_device_notify(struct ccw_device *cdev, int event)
337 {
338         if (!cdev->drv)
339                 return 0;
340         if (!cdev->online)
341                 return 0;
342         CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
343                       cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
344                       event);
345         return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
346 }
347
348 static void cmf_reenable_delayed(struct work_struct *work)
349 {
350         struct ccw_device_private *priv;
351         struct ccw_device *cdev;
352
353         priv = container_of(work, struct ccw_device_private, kick_work);
354         cdev = priv->cdev;
355         cmf_reenable(cdev);
356 }
357
358 static void ccw_device_oper_notify(struct ccw_device *cdev)
359 {
360         if (ccw_device_notify(cdev, CIO_OPER)) {
361                 /* Reenable channel measurements, if needed. */
362                 PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed);
363                 queue_work(ccw_device_work, &cdev->private->kick_work);
364                 return;
365         }
366         /* Driver doesn't want device back. */
367         ccw_device_set_notoper(cdev);
368         PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unreg_rereg);
369         queue_work(ccw_device_work, &cdev->private->kick_work);
370 }
371
372 /*
373  * Finished with online/offline processing.
374  */
375 static void
376 ccw_device_done(struct ccw_device *cdev, int state)
377 {
378         struct subchannel *sch;
379
380         sch = to_subchannel(cdev->dev.parent);
381
382         ccw_device_set_timeout(cdev, 0);
383
384         if (state != DEV_STATE_ONLINE)
385                 cio_disable_subchannel(sch);
386
387         /* Reset device status. */
388         memset(&cdev->private->irb, 0, sizeof(struct irb));
389
390         cdev->private->state = state;
391
392
393         if (state == DEV_STATE_BOXED)
394                 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
395                               cdev->private->dev_id.devno, sch->schid.sch_no);
396
397         if (cdev->private->flags.donotify) {
398                 cdev->private->flags.donotify = 0;
399                 ccw_device_oper_notify(cdev);
400         }
401         wake_up(&cdev->private->wait_q);
402
403         if (css_init_done && state != DEV_STATE_ONLINE)
404                 put_device (&cdev->dev);
405 }
406
407 static int cmp_pgid(struct pgid *p1, struct pgid *p2)
408 {
409         char *c1;
410         char *c2;
411
412         c1 = (char *)p1;
413         c2 = (char *)p2;
414
415         return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
416 }
417
418 static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
419 {
420         int i;
421         int last;
422
423         last = 0;
424         for (i = 0; i < 8; i++) {
425                 if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
426                         /* No PGID yet */
427                         continue;
428                 if (cdev->private->pgid[last].inf.ps.state1 ==
429                     SNID_STATE1_RESET) {
430                         /* First non-zero PGID */
431                         last = i;
432                         continue;
433                 }
434                 if (cmp_pgid(&cdev->private->pgid[i],
435                              &cdev->private->pgid[last]) == 0)
436                         /* Non-conflicting PGIDs */
437                         continue;
438
439                 /* PGID mismatch, can't pathgroup. */
440                 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
441                               "0.%x.%04x, can't pathgroup\n",
442                               cdev->private->dev_id.ssid,
443                               cdev->private->dev_id.devno);
444                 cdev->private->options.pgroup = 0;
445                 return;
446         }
447         if (cdev->private->pgid[last].inf.ps.state1 ==
448             SNID_STATE1_RESET)
449                 /* No previous pgid found */
450                 memcpy(&cdev->private->pgid[0],
451                        &channel_subsystems[0]->global_pgid,
452                        sizeof(struct pgid));
453         else
454                 /* Use existing pgid */
455                 memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
456                        sizeof(struct pgid));
457 }
458
459 /*
460  * Function called from device_pgid.c after sense path ground has completed.
461  */
462 void
463 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
464 {
465         struct subchannel *sch;
466
467         sch = to_subchannel(cdev->dev.parent);
468         switch (err) {
469         case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
470                 cdev->private->options.pgroup = 0;
471                 break;
472         case 0: /* success */
473         case -EACCES: /* partial success, some paths not operational */
474                 /* Check if all pgids are equal or 0. */
475                 __ccw_device_get_common_pgid(cdev);
476                 break;
477         case -ETIME:            /* Sense path group id stopped by timeout. */
478         case -EUSERS:           /* device is reserved for someone else. */
479                 ccw_device_done(cdev, DEV_STATE_BOXED);
480                 return;
481         default:
482                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
483                 return;
484         }
485         /* Start Path Group verification. */
486         cdev->private->state = DEV_STATE_VERIFY;
487         cdev->private->flags.doverify = 0;
488         ccw_device_verify_start(cdev);
489 }
490
491 /*
492  * Start device recognition.
493  */
494 int
495 ccw_device_recognition(struct ccw_device *cdev)
496 {
497         struct subchannel *sch;
498         int ret;
499
500         if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
501             (cdev->private->state != DEV_STATE_BOXED))
502                 return -EINVAL;
503         sch = to_subchannel(cdev->dev.parent);
504         ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
505         if (ret != 0)
506                 /* Couldn't enable the subchannel for i/o. Sick device. */
507                 return ret;
508
509         /* After 60s the device recognition is considered to have failed. */
510         ccw_device_set_timeout(cdev, 60*HZ);
511
512         /*
513          * We used to start here with a sense pgid to find out whether a device
514          * is locked by someone else. Unfortunately, the sense pgid command
515          * code has other meanings on devices predating the path grouping
516          * algorithm, so we start with sense id and box the device after an
517          * timeout (or if sense pgid during path verification detects the device
518          * is locked, as may happen on newer devices).
519          */
520         cdev->private->flags.recog_done = 0;
521         cdev->private->state = DEV_STATE_SENSE_ID;
522         ccw_device_sense_id_start(cdev);
523         return 0;
524 }
525
526 /*
527  * Handle timeout in device recognition.
528  */
529 static void
530 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
531 {
532         int ret;
533
534         ret = ccw_device_cancel_halt_clear(cdev);
535         switch (ret) {
536         case 0:
537                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
538                 break;
539         case -ENODEV:
540                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
541                 break;
542         default:
543                 ccw_device_set_timeout(cdev, 3*HZ);
544         }
545 }
546
547
548 void
549 ccw_device_verify_done(struct ccw_device *cdev, int err)
550 {
551         struct subchannel *sch;
552
553         sch = to_subchannel(cdev->dev.parent);
554         /* Update schib - pom may have changed. */
555         stsch(sch->schid, &sch->schib);
556         /* Update lpm with verified path mask. */
557         sch->lpm = sch->vpm;
558         /* Repeat path verification? */
559         if (cdev->private->flags.doverify) {
560                 cdev->private->flags.doverify = 0;
561                 ccw_device_verify_start(cdev);
562                 return;
563         }
564         switch (err) {
565         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
566                 cdev->private->options.pgroup = 0;
567         case 0:
568                 ccw_device_done(cdev, DEV_STATE_ONLINE);
569                 /* Deliver fake irb to device driver, if needed. */
570                 if (cdev->private->flags.fake_irb) {
571                         memset(&cdev->private->irb, 0, sizeof(struct irb));
572                         cdev->private->irb.scsw.cmd.cc = 1;
573                         cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
574                         cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
575                         cdev->private->irb.scsw.cmd.stctl =
576                                 SCSW_STCTL_STATUS_PEND;
577                         cdev->private->flags.fake_irb = 0;
578                         if (cdev->handler)
579                                 cdev->handler(cdev, cdev->private->intparm,
580                                               &cdev->private->irb);
581                         memset(&cdev->private->irb, 0, sizeof(struct irb));
582                 }
583                 break;
584         case -ETIME:
585                 /* Reset oper notify indication after verify error. */
586                 cdev->private->flags.donotify = 0;
587                 ccw_device_done(cdev, DEV_STATE_BOXED);
588                 break;
589         default:
590                 /* Reset oper notify indication after verify error. */
591                 cdev->private->flags.donotify = 0;
592                 if (cdev->online) {
593                         ccw_device_set_timeout(cdev, 0);
594                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
595                 } else
596                         ccw_device_done(cdev, DEV_STATE_NOT_OPER);
597                 break;
598         }
599 }
600
601 /*
602  * Get device online.
603  */
604 int
605 ccw_device_online(struct ccw_device *cdev)
606 {
607         struct subchannel *sch;
608         int ret;
609
610         if ((cdev->private->state != DEV_STATE_OFFLINE) &&
611             (cdev->private->state != DEV_STATE_BOXED))
612                 return -EINVAL;
613         sch = to_subchannel(cdev->dev.parent);
614         if (css_init_done && !get_device(&cdev->dev))
615                 return -ENODEV;
616         ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
617         if (ret != 0) {
618                 /* Couldn't enable the subchannel for i/o. Sick device. */
619                 if (ret == -ENODEV)
620                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
621                 return ret;
622         }
623         /* Do we want to do path grouping? */
624         if (!cdev->private->options.pgroup) {
625                 /* Start initial path verification. */
626                 cdev->private->state = DEV_STATE_VERIFY;
627                 cdev->private->flags.doverify = 0;
628                 ccw_device_verify_start(cdev);
629                 return 0;
630         }
631         /* Do a SensePGID first. */
632         cdev->private->state = DEV_STATE_SENSE_PGID;
633         ccw_device_sense_pgid_start(cdev);
634         return 0;
635 }
636
637 void
638 ccw_device_disband_done(struct ccw_device *cdev, int err)
639 {
640         switch (err) {
641         case 0:
642                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
643                 break;
644         case -ETIME:
645                 ccw_device_done(cdev, DEV_STATE_BOXED);
646                 break;
647         default:
648                 cdev->private->flags.donotify = 0;
649                 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
650                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
651                 break;
652         }
653 }
654
655 /*
656  * Shutdown device.
657  */
658 int
659 ccw_device_offline(struct ccw_device *cdev)
660 {
661         struct subchannel *sch;
662
663         /* Allow ccw_device_offline while disconnected. */
664         if (cdev->private->state == DEV_STATE_DISCONNECTED ||
665             cdev->private->state == DEV_STATE_NOT_OPER) {
666                 cdev->private->flags.donotify = 0;
667                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
668                 return 0;
669         }
670         if (ccw_device_is_orphan(cdev)) {
671                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
672                 return 0;
673         }
674         sch = to_subchannel(cdev->dev.parent);
675         if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
676                 return -ENODEV;
677         if (scsw_actl(&sch->schib.scsw) != 0)
678                 return -EBUSY;
679         if (cdev->private->state != DEV_STATE_ONLINE)
680                 return -EINVAL;
681         /* Are we doing path grouping? */
682         if (!cdev->private->options.pgroup) {
683                 /* No, set state offline immediately. */
684                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
685                 return 0;
686         }
687         /* Start Set Path Group commands. */
688         cdev->private->state = DEV_STATE_DISBAND_PGID;
689         ccw_device_disband_start(cdev);
690         return 0;
691 }
692
693 /*
694  * Handle timeout in device online/offline process.
695  */
696 static void
697 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
698 {
699         int ret;
700
701         ret = ccw_device_cancel_halt_clear(cdev);
702         switch (ret) {
703         case 0:
704                 ccw_device_done(cdev, DEV_STATE_BOXED);
705                 break;
706         case -ENODEV:
707                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
708                 break;
709         default:
710                 ccw_device_set_timeout(cdev, 3*HZ);
711         }
712 }
713
714 /*
715  * Handle not oper event in device recognition.
716  */
717 static void
718 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
719 {
720         ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
721 }
722
723 /*
724  * Handle not operational event in non-special state.
725  */
726 static void ccw_device_generic_notoper(struct ccw_device *cdev,
727                                        enum dev_event dev_event)
728 {
729         struct subchannel *sch;
730
731         cdev->private->state = DEV_STATE_NOT_OPER;
732         sch = to_subchannel(cdev->dev.parent);
733         css_schedule_eval(sch->schid);
734 }
735
736 /*
737  * Handle path verification event.
738  */
739 static void
740 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
741 {
742         struct subchannel *sch;
743
744         if (cdev->private->state == DEV_STATE_W4SENSE) {
745                 cdev->private->flags.doverify = 1;
746                 return;
747         }
748         sch = to_subchannel(cdev->dev.parent);
749         /*
750          * Since we might not just be coming from an interrupt from the
751          * subchannel we have to update the schib.
752          */
753         stsch(sch->schid, &sch->schib);
754
755         if (scsw_actl(&sch->schib.scsw) != 0 ||
756             (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
757             (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
758                 /*
759                  * No final status yet or final status not yet delivered
760                  * to the device driver. Can't do path verfication now,
761                  * delay until final status was delivered.
762                  */
763                 cdev->private->flags.doverify = 1;
764                 return;
765         }
766         /* Device is idle, we can do the path verification. */
767         cdev->private->state = DEV_STATE_VERIFY;
768         cdev->private->flags.doverify = 0;
769         ccw_device_verify_start(cdev);
770 }
771
772 /*
773  * Got an interrupt for a normal io (state online).
774  */
775 static void
776 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
777 {
778         struct irb *irb;
779         int is_cmd;
780
781         irb = (struct irb *) __LC_IRB;
782         is_cmd = !scsw_is_tm(&irb->scsw);
783         /* Check for unsolicited interrupt. */
784         if (!scsw_is_solicited(&irb->scsw)) {
785                 if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
786                     !irb->esw.esw0.erw.cons) {
787                         /* Unit check but no sense data. Need basic sense. */
788                         if (ccw_device_do_sense(cdev, irb) != 0)
789                                 goto call_handler_unsol;
790                         memcpy(&cdev->private->irb, irb, sizeof(struct irb));
791                         cdev->private->state = DEV_STATE_W4SENSE;
792                         cdev->private->intparm = 0;
793                         return;
794                 }
795 call_handler_unsol:
796                 if (cdev->handler)
797                         cdev->handler (cdev, 0, irb);
798                 if (cdev->private->flags.doverify)
799                         ccw_device_online_verify(cdev, 0);
800                 return;
801         }
802         /* Accumulate status and find out if a basic sense is needed. */
803         ccw_device_accumulate_irb(cdev, irb);
804         if (is_cmd && cdev->private->flags.dosense) {
805                 if (ccw_device_do_sense(cdev, irb) == 0) {
806                         cdev->private->state = DEV_STATE_W4SENSE;
807                 }
808                 return;
809         }
810         /* Call the handler. */
811         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
812                 /* Start delayed path verification. */
813                 ccw_device_online_verify(cdev, 0);
814 }
815
816 /*
817  * Got an timeout in online state.
818  */
819 static void
820 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
821 {
822         int ret;
823
824         ccw_device_set_timeout(cdev, 0);
825         ret = ccw_device_cancel_halt_clear(cdev);
826         if (ret == -EBUSY) {
827                 ccw_device_set_timeout(cdev, 3*HZ);
828                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
829                 return;
830         }
831         if (ret == -ENODEV)
832                 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
833         else if (cdev->handler)
834                 cdev->handler(cdev, cdev->private->intparm,
835                               ERR_PTR(-ETIMEDOUT));
836 }
837
838 /*
839  * Got an interrupt for a basic sense.
840  */
841 static void
842 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
843 {
844         struct irb *irb;
845
846         irb = (struct irb *) __LC_IRB;
847         /* Check for unsolicited interrupt. */
848         if (scsw_stctl(&irb->scsw) ==
849             (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
850                 if (scsw_cc(&irb->scsw) == 1)
851                         /* Basic sense hasn't started. Try again. */
852                         ccw_device_do_sense(cdev, irb);
853                 else {
854                         CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
855                                       "interrupt during w4sense...\n",
856                                       cdev->private->dev_id.ssid,
857                                       cdev->private->dev_id.devno);
858                         if (cdev->handler)
859                                 cdev->handler (cdev, 0, irb);
860                 }
861                 return;
862         }
863         /*
864          * Check if a halt or clear has been issued in the meanwhile. If yes,
865          * only deliver the halt/clear interrupt to the device driver as if it
866          * had killed the original request.
867          */
868         if (scsw_fctl(&irb->scsw) &
869             (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
870                 /* Retry Basic Sense if requested. */
871                 if (cdev->private->flags.intretry) {
872                         cdev->private->flags.intretry = 0;
873                         ccw_device_do_sense(cdev, irb);
874                         return;
875                 }
876                 cdev->private->flags.dosense = 0;
877                 memset(&cdev->private->irb, 0, sizeof(struct irb));
878                 ccw_device_accumulate_irb(cdev, irb);
879                 goto call_handler;
880         }
881         /* Add basic sense info to irb. */
882         ccw_device_accumulate_basic_sense(cdev, irb);
883         if (cdev->private->flags.dosense) {
884                 /* Another basic sense is needed. */
885                 ccw_device_do_sense(cdev, irb);
886                 return;
887         }
888 call_handler:
889         cdev->private->state = DEV_STATE_ONLINE;
890         /* Call the handler. */
891         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
892                 /* Start delayed path verification. */
893                 ccw_device_online_verify(cdev, 0);
894 }
895
896 static void
897 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
898 {
899         struct irb *irb;
900
901         irb = (struct irb *) __LC_IRB;
902         /* Accumulate status. We don't do basic sense. */
903         ccw_device_accumulate_irb(cdev, irb);
904         /* Remember to clear irb to avoid residuals. */
905         memset(&cdev->private->irb, 0, sizeof(struct irb));
906         /* Try to start delayed device verification. */
907         ccw_device_online_verify(cdev, 0);
908         /* Note: Don't call handler for cio initiated clear! */
909 }
910
911 static void
912 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
913 {
914         struct subchannel *sch;
915
916         sch = to_subchannel(cdev->dev.parent);
917         ccw_device_set_timeout(cdev, 0);
918         /* Start delayed path verification. */
919         ccw_device_online_verify(cdev, 0);
920         /* OK, i/o is dead now. Call interrupt handler. */
921         if (cdev->handler)
922                 cdev->handler(cdev, cdev->private->intparm,
923                               ERR_PTR(-EIO));
924 }
925
926 static void
927 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
928 {
929         int ret;
930
931         ret = ccw_device_cancel_halt_clear(cdev);
932         if (ret == -EBUSY) {
933                 ccw_device_set_timeout(cdev, 3*HZ);
934                 return;
935         }
936         /* Start delayed path verification. */
937         ccw_device_online_verify(cdev, 0);
938         if (cdev->handler)
939                 cdev->handler(cdev, cdev->private->intparm,
940                               ERR_PTR(-EIO));
941 }
942
943 void ccw_device_kill_io(struct ccw_device *cdev)
944 {
945         int ret;
946
947         ret = ccw_device_cancel_halt_clear(cdev);
948         if (ret == -EBUSY) {
949                 ccw_device_set_timeout(cdev, 3*HZ);
950                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
951                 return;
952         }
953         /* Start delayed path verification. */
954         ccw_device_online_verify(cdev, 0);
955         if (cdev->handler)
956                 cdev->handler(cdev, cdev->private->intparm,
957                               ERR_PTR(-EIO));
958 }
959
960 static void
961 ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
962 {
963         /* Start verification after current task finished. */
964         cdev->private->flags.doverify = 1;
965 }
966
967 static void
968 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
969 {
970         struct irb *irb;
971
972         switch (dev_event) {
973         case DEV_EVENT_INTERRUPT:
974                 irb = (struct irb *) __LC_IRB;
975                 /* Check for unsolicited interrupt. */
976                 if ((scsw_stctl(&irb->scsw) ==
977                      (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
978                     (!scsw_cc(&irb->scsw)))
979                         /* FIXME: we should restart stlck here, but this
980                          * is extremely unlikely ... */
981                         goto out_wakeup;
982
983                 ccw_device_accumulate_irb(cdev, irb);
984                 /* We don't care about basic sense etc. */
985                 break;
986         default: /* timeout */
987                 break;
988         }
989 out_wakeup:
990         wake_up(&cdev->private->wait_q);
991 }
992
993 static void
994 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
995 {
996         struct subchannel *sch;
997
998         sch = to_subchannel(cdev->dev.parent);
999         if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
1000                 /* Couldn't enable the subchannel for i/o. Sick device. */
1001                 return;
1002
1003         /* After 60s the device recognition is considered to have failed. */
1004         ccw_device_set_timeout(cdev, 60*HZ);
1005
1006         cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1007         ccw_device_sense_id_start(cdev);
1008 }
1009
1010 void ccw_device_trigger_reprobe(struct ccw_device *cdev)
1011 {
1012         struct subchannel *sch;
1013
1014         if (cdev->private->state != DEV_STATE_DISCONNECTED)
1015                 return;
1016
1017         sch = to_subchannel(cdev->dev.parent);
1018         /* Update some values. */
1019         if (stsch(sch->schid, &sch->schib))
1020                 return;
1021         if (!sch->schib.pmcw.dnv)
1022                 return;
1023         /*
1024          * The pim, pam, pom values may not be accurate, but they are the best
1025          * we have before performing device selection :/
1026          */
1027         sch->lpm = sch->schib.pmcw.pam & sch->opm;
1028         /* Re-set some bits in the pmcw that were lost. */
1029         sch->schib.pmcw.csense = 1;
1030         sch->schib.pmcw.ena = 0;
1031         if ((sch->lpm & (sch->lpm - 1)) != 0)
1032                 sch->schib.pmcw.mp = 1;
1033         /* We should also udate ssd info, but this has to wait. */
1034         /* Check if this is another device which appeared on the same sch. */
1035         if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1036                 PREPARE_WORK(&cdev->private->kick_work,
1037                              ccw_device_move_to_orphanage);
1038                 queue_work(slow_path_wq, &cdev->private->kick_work);
1039         } else
1040                 ccw_device_start_id(cdev, 0);
1041 }
1042
1043 static void
1044 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1045 {
1046         struct subchannel *sch;
1047
1048         sch = to_subchannel(cdev->dev.parent);
1049         /*
1050          * An interrupt in state offline means a previous disable was not
1051          * successful. Try again.
1052          */
1053         cio_disable_subchannel(sch);
1054 }
1055
1056 static void
1057 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1058 {
1059         retry_set_schib(cdev);
1060         cdev->private->state = DEV_STATE_ONLINE;
1061         dev_fsm_event(cdev, dev_event);
1062 }
1063
1064 static void ccw_device_update_cmfblock(struct ccw_device *cdev,
1065                                        enum dev_event dev_event)
1066 {
1067         cmf_retry_copy_block(cdev);
1068         cdev->private->state = DEV_STATE_ONLINE;
1069         dev_fsm_event(cdev, dev_event);
1070 }
1071
1072 static void
1073 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1074 {
1075         ccw_device_set_timeout(cdev, 0);
1076         if (dev_event == DEV_EVENT_NOTOPER)
1077                 cdev->private->state = DEV_STATE_NOT_OPER;
1078         else
1079                 cdev->private->state = DEV_STATE_OFFLINE;
1080         wake_up(&cdev->private->wait_q);
1081 }
1082
1083 static void
1084 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1085 {
1086         int ret;
1087
1088         ret = ccw_device_cancel_halt_clear(cdev);
1089         switch (ret) {
1090         case 0:
1091                 cdev->private->state = DEV_STATE_OFFLINE;
1092                 wake_up(&cdev->private->wait_q);
1093                 break;
1094         case -ENODEV:
1095                 cdev->private->state = DEV_STATE_NOT_OPER;
1096                 wake_up(&cdev->private->wait_q);
1097                 break;
1098         default:
1099                 ccw_device_set_timeout(cdev, HZ/10);
1100         }
1101 }
1102
1103 /*
1104  * No operation action. This is used e.g. to ignore a timeout event in
1105  * state offline.
1106  */
1107 static void
1108 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1109 {
1110 }
1111
1112 /*
1113  * Bug operation action. 
1114  */
1115 static void
1116 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1117 {
1118         CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
1119                       "0.%x.%04x\n", cdev->private->state, dev_event,
1120                       cdev->private->dev_id.ssid,
1121                       cdev->private->dev_id.devno);
1122         BUG();
1123 }
1124
1125 /*
1126  * device statemachine
1127  */
1128 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1129         [DEV_STATE_NOT_OPER] = {
1130                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1131                 [DEV_EVENT_INTERRUPT]   = ccw_device_bug,
1132                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1133                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1134         },
1135         [DEV_STATE_SENSE_PGID] = {
1136                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1137                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_pgid_irq,
1138                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1139                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1140         },
1141         [DEV_STATE_SENSE_ID] = {
1142                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1143                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1144                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1145                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1146         },
1147         [DEV_STATE_OFFLINE] = {
1148                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1149                 [DEV_EVENT_INTERRUPT]   = ccw_device_offline_irq,
1150                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1151                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1152         },
1153         [DEV_STATE_VERIFY] = {
1154                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1155                 [DEV_EVENT_INTERRUPT]   = ccw_device_verify_irq,
1156                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1157                 [DEV_EVENT_VERIFY]      = ccw_device_delay_verify,
1158         },
1159         [DEV_STATE_ONLINE] = {
1160                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1161                 [DEV_EVENT_INTERRUPT]   = ccw_device_irq,
1162                 [DEV_EVENT_TIMEOUT]     = ccw_device_online_timeout,
1163                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1164         },
1165         [DEV_STATE_W4SENSE] = {
1166                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1167                 [DEV_EVENT_INTERRUPT]   = ccw_device_w4sense,
1168                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1169                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1170         },
1171         [DEV_STATE_DISBAND_PGID] = {
1172                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1173                 [DEV_EVENT_INTERRUPT]   = ccw_device_disband_irq,
1174                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1175                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1176         },
1177         [DEV_STATE_BOXED] = {
1178                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1179                 [DEV_EVENT_INTERRUPT]   = ccw_device_stlck_done,
1180                 [DEV_EVENT_TIMEOUT]     = ccw_device_stlck_done,
1181                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1182         },
1183         /* states to wait for i/o completion before doing something */
1184         [DEV_STATE_CLEAR_VERIFY] = {
1185                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1186                 [DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1187                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1188                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1189         },
1190         [DEV_STATE_TIMEOUT_KILL] = {
1191                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1192                 [DEV_EVENT_INTERRUPT]   = ccw_device_killing_irq,
1193                 [DEV_EVENT_TIMEOUT]     = ccw_device_killing_timeout,
1194                 [DEV_EVENT_VERIFY]      = ccw_device_nop, //FIXME
1195         },
1196         [DEV_STATE_QUIESCE] = {
1197                 [DEV_EVENT_NOTOPER]     = ccw_device_quiesce_done,
1198                 [DEV_EVENT_INTERRUPT]   = ccw_device_quiesce_done,
1199                 [DEV_EVENT_TIMEOUT]     = ccw_device_quiesce_timeout,
1200                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1201         },
1202         /* special states for devices gone not operational */
1203         [DEV_STATE_DISCONNECTED] = {
1204                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1205                 [DEV_EVENT_INTERRUPT]   = ccw_device_start_id,
1206                 [DEV_EVENT_TIMEOUT]     = ccw_device_bug,
1207                 [DEV_EVENT_VERIFY]      = ccw_device_start_id,
1208         },
1209         [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1210                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1211                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1212                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1213                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1214         },
1215         [DEV_STATE_CMFCHANGE] = {
1216                 [DEV_EVENT_NOTOPER]     = ccw_device_change_cmfstate,
1217                 [DEV_EVENT_INTERRUPT]   = ccw_device_change_cmfstate,
1218                 [DEV_EVENT_TIMEOUT]     = ccw_device_change_cmfstate,
1219                 [DEV_EVENT_VERIFY]      = ccw_device_change_cmfstate,
1220         },
1221         [DEV_STATE_CMFUPDATE] = {
1222                 [DEV_EVENT_NOTOPER]     = ccw_device_update_cmfblock,
1223                 [DEV_EVENT_INTERRUPT]   = ccw_device_update_cmfblock,
1224                 [DEV_EVENT_TIMEOUT]     = ccw_device_update_cmfblock,
1225                 [DEV_EVENT_VERIFY]      = ccw_device_update_cmfblock,
1226         },
1227 };
1228
1229 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);