Merge git://git.infradead.org/hdrinstall-2.6
[linux-2.6] / drivers / block / ub.c
1 /*
2  * The low performance USB storage driver (ub).
3  *
4  * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
5  * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
6  *
7  * This work is a part of Linux kernel, is derived from it,
8  * and is not licensed separately. See file COPYING for details.
9  *
10  * TODO (sorted by decreasing priority)
11  *  -- set readonly flag for CDs, set removable flag for CF readers
12  *  -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
13  *  -- verify the 13 conditions and do bulk resets
14  *  -- highmem
15  *  -- move top_sense and work_bcs into separate allocations (if they survive)
16  *     for cache purists and esoteric architectures.
17  *  -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
18  *  -- prune comments, they are too volumnous
19  *  -- Resove XXX's
20  *  -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
21  */
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/usb.h>
25 #include <linux/usb_usual.h>
26 #include <linux/blkdev.h>
27 #include <linux/timer.h>
28 #include <scsi/scsi.h>
29
30 #define DRV_NAME "ub"
31
32 #define UB_MAJOR 180
33
34 /*
35  * The command state machine is the key model for understanding of this driver.
36  *
37  * The general rule is that all transitions are done towards the bottom
38  * of the diagram, thus preventing any loops.
39  *
40  * An exception to that is how the STAT state is handled. A counter allows it
41  * to be re-entered along the path marked with [C].
42  *
43  *       +--------+
44  *       ! INIT   !
45  *       +--------+
46  *           !
47  *        ub_scsi_cmd_start fails ->--------------------------------------\
48  *           !                                                            !
49  *           V                                                            !
50  *       +--------+                                                       !
51  *       ! CMD    !                                                       !
52  *       +--------+                                                       !
53  *           !                                            +--------+      !
54  *         was -EPIPE -->-------------------------------->! CLEAR  !      !
55  *           !                                            +--------+      !
56  *           !                                                !           !
57  *         was error -->------------------------------------- ! --------->\
58  *           !                                                !           !
59  *  /--<-- cmd->dir == NONE ?                                 !           !
60  *  !        !                                                !           !
61  *  !        V                                                !           !
62  *  !    +--------+                                           !           !
63  *  !    ! DATA   !                                           !           !
64  *  !    +--------+                                           !           !
65  *  !        !                           +---------+          !           !
66  *  !      was -EPIPE -->--------------->! CLR2STS !          !           !
67  *  !        !                           +---------+          !           !
68  *  !        !                                !               !           !
69  *  !        !                              was error -->---- ! --------->\
70  *  !      was error -->--------------------- ! ------------- ! --------->\
71  *  !        !                                !               !           !
72  *  !        V                                !               !           !
73  *  \--->+--------+                           !               !           !
74  *       ! STAT   !<--------------------------/               !           !
75  *  /--->+--------+                                           !           !
76  *  !        !                                                !           !
77  * [C]     was -EPIPE -->-----------\                         !           !
78  *  !        !                      !                         !           !
79  *  +<---- len == 0                 !                         !           !
80  *  !        !                      !                         !           !
81  *  !      was error -->--------------------------------------!---------->\
82  *  !        !                      !                         !           !
83  *  +<---- bad CSW                  !                         !           !
84  *  +<---- bad tag                  !                         !           !
85  *  !        !                      V                         !           !
86  *  !        !                 +--------+                     !           !
87  *  !        !                 ! CLRRS  !                     !           !
88  *  !        !                 +--------+                     !           !
89  *  !        !                      !                         !           !
90  *  \------- ! --------------------[C]--------\               !           !
91  *           !                                !               !           !
92  *         cmd->error---\                +--------+           !           !
93  *           !          +--------------->! SENSE  !<----------/           !
94  *         STAT_FAIL----/                +--------+                       !
95  *           !                                !                           V
96  *           !                                V                      +--------+
97  *           \--------------------------------\--------------------->! DONE   !
98  *                                                                   +--------+
99  */
100
101 /*
102  * This many LUNs per USB device.
103  * Every one of them takes a host, see UB_MAX_HOSTS.
104  */
105 #define UB_MAX_LUNS   9
106
107 /*
108  */
109
110 #define UB_PARTS_PER_LUN      8
111
112 #define UB_MAX_CDB_SIZE      16         /* Corresponds to Bulk */
113
114 #define UB_SENSE_SIZE  18
115
116 /*
117  */
118
119 /* command block wrapper */
120 struct bulk_cb_wrap {
121         __le32  Signature;              /* contains 'USBC' */
122         u32     Tag;                    /* unique per command id */
123         __le32  DataTransferLength;     /* size of data */
124         u8      Flags;                  /* direction in bit 0 */
125         u8      Lun;                    /* LUN */
126         u8      Length;                 /* of of the CDB */
127         u8      CDB[UB_MAX_CDB_SIZE];   /* max command */
128 };
129
130 #define US_BULK_CB_WRAP_LEN     31
131 #define US_BULK_CB_SIGN         0x43425355      /*spells out USBC */
132 #define US_BULK_FLAG_IN         1
133 #define US_BULK_FLAG_OUT        0
134
135 /* command status wrapper */
136 struct bulk_cs_wrap {
137         __le32  Signature;              /* should = 'USBS' */
138         u32     Tag;                    /* same as original command */
139         __le32  Residue;                /* amount not transferred */
140         u8      Status;                 /* see below */
141 };
142
143 #define US_BULK_CS_WRAP_LEN     13
144 #define US_BULK_CS_SIGN         0x53425355      /* spells out 'USBS' */
145 #define US_BULK_STAT_OK         0
146 #define US_BULK_STAT_FAIL       1
147 #define US_BULK_STAT_PHASE      2
148
149 /* bulk-only class specific requests */
150 #define US_BULK_RESET_REQUEST   0xff
151 #define US_BULK_GET_MAX_LUN     0xfe
152
153 /*
154  */
155 struct ub_dev;
156
157 #define UB_MAX_REQ_SG   9       /* cdrecord requires 32KB and maybe a header */
158 #define UB_MAX_SECTORS 64
159
160 /*
161  * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
162  * even if a webcam hogs the bus, but some devices need time to spin up.
163  */
164 #define UB_URB_TIMEOUT  (HZ*2)
165 #define UB_DATA_TIMEOUT (HZ*5)  /* ZIP does spin-ups in the data phase */
166 #define UB_STAT_TIMEOUT (HZ*5)  /* Same spinups and eject for a dataless cmd. */
167 #define UB_CTRL_TIMEOUT (HZ/2)  /* 500ms ought to be enough to clear a stall */
168
169 /*
170  * An instance of a SCSI command in transit.
171  */
172 #define UB_DIR_NONE     0
173 #define UB_DIR_READ     1
174 #define UB_DIR_ILLEGAL2 2
175 #define UB_DIR_WRITE    3
176
177 #define UB_DIR_CHAR(c)  (((c)==UB_DIR_WRITE)? 'w': \
178                          (((c)==UB_DIR_READ)? 'r': 'n'))
179
180 enum ub_scsi_cmd_state {
181         UB_CMDST_INIT,                  /* Initial state */
182         UB_CMDST_CMD,                   /* Command submitted */
183         UB_CMDST_DATA,                  /* Data phase */
184         UB_CMDST_CLR2STS,               /* Clearing before requesting status */
185         UB_CMDST_STAT,                  /* Status phase */
186         UB_CMDST_CLEAR,                 /* Clearing a stall (halt, actually) */
187         UB_CMDST_CLRRS,                 /* Clearing before retrying status */
188         UB_CMDST_SENSE,                 /* Sending Request Sense */
189         UB_CMDST_DONE                   /* Final state */
190 };
191
192 struct ub_scsi_cmd {
193         unsigned char cdb[UB_MAX_CDB_SIZE];
194         unsigned char cdb_len;
195
196         unsigned char dir;              /* 0 - none, 1 - read, 3 - write. */
197         enum ub_scsi_cmd_state state;
198         unsigned int tag;
199         struct ub_scsi_cmd *next;
200
201         int error;                      /* Return code - valid upon done */
202         unsigned int act_len;           /* Return size */
203         unsigned char key, asc, ascq;   /* May be valid if error==-EIO */
204
205         int stat_count;                 /* Retries getting status. */
206
207         unsigned int len;               /* Requested length */
208         unsigned int current_sg;
209         unsigned int nsg;               /* sgv[nsg] */
210         struct scatterlist sgv[UB_MAX_REQ_SG];
211
212         struct ub_lun *lun;
213         void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
214         void *back;
215 };
216
217 struct ub_request {
218         struct request *rq;
219         unsigned int current_try;
220         unsigned int nsg;               /* sgv[nsg] */
221         struct scatterlist sgv[UB_MAX_REQ_SG];
222 };
223
224 /*
225  */
226 struct ub_capacity {
227         unsigned long nsec;             /* Linux size - 512 byte sectors */
228         unsigned int bsize;             /* Linux hardsect_size */
229         unsigned int bshift;            /* Shift between 512 and hard sects */
230 };
231
232 /*
233  * This is a direct take-off from linux/include/completion.h
234  * The difference is that I do not wait on this thing, just poll.
235  * When I want to wait (ub_probe), I just use the stock completion.
236  *
237  * Note that INIT_COMPLETION takes no lock. It is correct. But why
238  * in the bloody hell that thing takes struct instead of pointer to struct
239  * is quite beyond me. I just copied it from the stock completion.
240  */
241 struct ub_completion {
242         unsigned int done;
243         spinlock_t lock;
244 };
245
246 static inline void ub_init_completion(struct ub_completion *x)
247 {
248         x->done = 0;
249         spin_lock_init(&x->lock);
250 }
251
252 #define UB_INIT_COMPLETION(x)   ((x).done = 0)
253
254 static void ub_complete(struct ub_completion *x)
255 {
256         unsigned long flags;
257
258         spin_lock_irqsave(&x->lock, flags);
259         x->done++;
260         spin_unlock_irqrestore(&x->lock, flags);
261 }
262
263 static int ub_is_completed(struct ub_completion *x)
264 {
265         unsigned long flags;
266         int ret;
267
268         spin_lock_irqsave(&x->lock, flags);
269         ret = x->done;
270         spin_unlock_irqrestore(&x->lock, flags);
271         return ret;
272 }
273
274 /*
275  */
276 struct ub_scsi_cmd_queue {
277         int qlen, qmax;
278         struct ub_scsi_cmd *head, *tail;
279 };
280
281 /*
282  * The block device instance (one per LUN).
283  */
284 struct ub_lun {
285         struct ub_dev *udev;
286         struct list_head link;
287         struct gendisk *disk;
288         int id;                         /* Host index */
289         int num;                        /* LUN number */
290         char name[16];
291
292         int changed;                    /* Media was changed */
293         int removable;
294         int readonly;
295
296         struct ub_request urq;
297
298         /* Use Ingo's mempool if or when we have more than one command. */
299         /*
300          * Currently we never need more than one command for the whole device.
301          * However, giving every LUN a command is a cheap and automatic way
302          * to enforce fairness between them.
303          */
304         int cmda[1];
305         struct ub_scsi_cmd cmdv[1];
306
307         struct ub_capacity capacity; 
308 };
309
310 /*
311  * The USB device instance.
312  */
313 struct ub_dev {
314         spinlock_t *lock;
315         atomic_t poison;                /* The USB device is disconnected */
316         int openc;                      /* protected by ub_lock! */
317                                         /* kref is too implicit for our taste */
318         int reset;                      /* Reset is running */
319         unsigned int tagcnt;
320         char name[12];
321         struct usb_device *dev;
322         struct usb_interface *intf;
323
324         struct list_head luns;
325
326         unsigned int send_bulk_pipe;    /* cached pipe values */
327         unsigned int recv_bulk_pipe;
328         unsigned int send_ctrl_pipe;
329         unsigned int recv_ctrl_pipe;
330
331         struct tasklet_struct tasklet;
332
333         struct ub_scsi_cmd_queue cmd_queue;
334         struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
335         unsigned char top_sense[UB_SENSE_SIZE];
336
337         struct ub_completion work_done;
338         struct urb work_urb;
339         struct timer_list work_timer;
340         int last_pipe;                  /* What might need clearing */
341         __le32 signature;               /* Learned signature */
342         struct bulk_cb_wrap work_bcb;
343         struct bulk_cs_wrap work_bcs;
344         struct usb_ctrlrequest work_cr;
345
346         struct work_struct reset_work;
347         wait_queue_head_t reset_wait;
348
349         int sg_stat[6];
350 };
351
352 /*
353  */
354 static void ub_cleanup(struct ub_dev *sc);
355 static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
356 static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
357     struct ub_scsi_cmd *cmd, struct ub_request *urq);
358 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
359     struct ub_scsi_cmd *cmd, struct ub_request *urq);
360 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
361 static void ub_end_rq(struct request *rq, int uptodate);
362 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
363     struct ub_request *urq, struct ub_scsi_cmd *cmd);
364 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
365 static void ub_urb_complete(struct urb *urb, struct pt_regs *pt);
366 static void ub_scsi_action(unsigned long _dev);
367 static void ub_scsi_dispatch(struct ub_dev *sc);
368 static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
369 static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
370 static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
371 static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
372 static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
373 static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
374 static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
375 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
376     int stalled_pipe);
377 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
378 static void ub_reset_enter(struct ub_dev *sc, int try);
379 static void ub_reset_task(void *arg);
380 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
381 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
382     struct ub_capacity *ret);
383 static int ub_sync_reset(struct ub_dev *sc);
384 static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
385 static int ub_probe_lun(struct ub_dev *sc, int lnum);
386
387 /*
388  */
389 #ifdef CONFIG_USB_LIBUSUAL
390
391 #define ub_usb_ids  storage_usb_ids
392 #else
393
394 static struct usb_device_id ub_usb_ids[] = {
395         { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
396         { }
397 };
398
399 MODULE_DEVICE_TABLE(usb, ub_usb_ids);
400 #endif /* CONFIG_USB_LIBUSUAL */
401
402 /*
403  * Find me a way to identify "next free minor" for add_disk(),
404  * and the array disappears the next day. However, the number of
405  * hosts has something to do with the naming and /proc/partitions.
406  * This has to be thought out in detail before changing.
407  * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
408  */
409 #define UB_MAX_HOSTS  26
410 static char ub_hostv[UB_MAX_HOSTS];
411
412 #define UB_QLOCK_NUM 5
413 static spinlock_t ub_qlockv[UB_QLOCK_NUM];
414 static int ub_qlock_next = 0;
415
416 static DEFINE_SPINLOCK(ub_lock);        /* Locks globals and ->openc */
417
418 /*
419  * The id allocator.
420  *
421  * This also stores the host for indexing by minor, which is somewhat dirty.
422  */
423 static int ub_id_get(void)
424 {
425         unsigned long flags;
426         int i;
427
428         spin_lock_irqsave(&ub_lock, flags);
429         for (i = 0; i < UB_MAX_HOSTS; i++) {
430                 if (ub_hostv[i] == 0) {
431                         ub_hostv[i] = 1;
432                         spin_unlock_irqrestore(&ub_lock, flags);
433                         return i;
434                 }
435         }
436         spin_unlock_irqrestore(&ub_lock, flags);
437         return -1;
438 }
439
440 static void ub_id_put(int id)
441 {
442         unsigned long flags;
443
444         if (id < 0 || id >= UB_MAX_HOSTS) {
445                 printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
446                 return;
447         }
448
449         spin_lock_irqsave(&ub_lock, flags);
450         if (ub_hostv[id] == 0) {
451                 spin_unlock_irqrestore(&ub_lock, flags);
452                 printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
453                 return;
454         }
455         ub_hostv[id] = 0;
456         spin_unlock_irqrestore(&ub_lock, flags);
457 }
458
459 /*
460  * This is necessitated by the fact that blk_cleanup_queue does not
461  * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
462  * Since our blk_init_queue() passes a spinlock common with ub_dev,
463  * we have life time issues when ub_cleanup frees ub_dev.
464  */
465 static spinlock_t *ub_next_lock(void)
466 {
467         unsigned long flags;
468         spinlock_t *ret;
469
470         spin_lock_irqsave(&ub_lock, flags);
471         ret = &ub_qlockv[ub_qlock_next];
472         ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
473         spin_unlock_irqrestore(&ub_lock, flags);
474         return ret;
475 }
476
477 /*
478  * Downcount for deallocation. This rides on two assumptions:
479  *  - once something is poisoned, its refcount cannot grow
480  *  - opens cannot happen at this time (del_gendisk was done)
481  * If the above is true, we can drop the lock, which we need for
482  * blk_cleanup_queue(): the silly thing may attempt to sleep.
483  * [Actually, it never needs to sleep for us, but it calls might_sleep()]
484  */
485 static void ub_put(struct ub_dev *sc)
486 {
487         unsigned long flags;
488
489         spin_lock_irqsave(&ub_lock, flags);
490         --sc->openc;
491         if (sc->openc == 0 && atomic_read(&sc->poison)) {
492                 spin_unlock_irqrestore(&ub_lock, flags);
493                 ub_cleanup(sc);
494         } else {
495                 spin_unlock_irqrestore(&ub_lock, flags);
496         }
497 }
498
499 /*
500  * Final cleanup and deallocation.
501  */
502 static void ub_cleanup(struct ub_dev *sc)
503 {
504         struct list_head *p;
505         struct ub_lun *lun;
506         request_queue_t *q;
507
508         while (!list_empty(&sc->luns)) {
509                 p = sc->luns.next;
510                 lun = list_entry(p, struct ub_lun, link);
511                 list_del(p);
512
513                 /* I don't think queue can be NULL. But... Stolen from sx8.c */
514                 if ((q = lun->disk->queue) != NULL)
515                         blk_cleanup_queue(q);
516                 /*
517                  * If we zero disk->private_data BEFORE put_disk, we have
518                  * to check for NULL all over the place in open, release,
519                  * check_media and revalidate, because the block level
520                  * semaphore is well inside the put_disk.
521                  * But we cannot zero after the call, because *disk is gone.
522                  * The sd.c is blatantly racy in this area.
523                  */
524                 /* disk->private_data = NULL; */
525                 put_disk(lun->disk);
526                 lun->disk = NULL;
527
528                 ub_id_put(lun->id);
529                 kfree(lun);
530         }
531
532         usb_set_intfdata(sc->intf, NULL);
533         usb_put_intf(sc->intf);
534         usb_put_dev(sc->dev);
535         kfree(sc);
536 }
537
538 /*
539  * The "command allocator".
540  */
541 static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
542 {
543         struct ub_scsi_cmd *ret;
544
545         if (lun->cmda[0])
546                 return NULL;
547         ret = &lun->cmdv[0];
548         lun->cmda[0] = 1;
549         return ret;
550 }
551
552 static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
553 {
554         if (cmd != &lun->cmdv[0]) {
555                 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
556                     lun->name, cmd);
557                 return;
558         }
559         if (!lun->cmda[0]) {
560                 printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
561                 return;
562         }
563         lun->cmda[0] = 0;
564 }
565
566 /*
567  * The command queue.
568  */
569 static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
570 {
571         struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
572
573         if (t->qlen++ == 0) {
574                 t->head = cmd;
575                 t->tail = cmd;
576         } else {
577                 t->tail->next = cmd;
578                 t->tail = cmd;
579         }
580
581         if (t->qlen > t->qmax)
582                 t->qmax = t->qlen;
583 }
584
585 static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
586 {
587         struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
588
589         if (t->qlen++ == 0) {
590                 t->head = cmd;
591                 t->tail = cmd;
592         } else {
593                 cmd->next = t->head;
594                 t->head = cmd;
595         }
596
597         if (t->qlen > t->qmax)
598                 t->qmax = t->qlen;
599 }
600
601 static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
602 {
603         struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
604         struct ub_scsi_cmd *cmd;
605
606         if (t->qlen == 0)
607                 return NULL;
608         if (--t->qlen == 0)
609                 t->tail = NULL;
610         cmd = t->head;
611         t->head = cmd->next;
612         cmd->next = NULL;
613         return cmd;
614 }
615
616 #define ub_cmdq_peek(sc)  ((sc)->cmd_queue.head)
617
618 /*
619  * The request function is our main entry point
620  */
621
622 static void ub_request_fn(request_queue_t *q)
623 {
624         struct ub_lun *lun = q->queuedata;
625         struct request *rq;
626
627         while ((rq = elv_next_request(q)) != NULL) {
628                 if (ub_request_fn_1(lun, rq) != 0) {
629                         blk_stop_queue(q);
630                         break;
631                 }
632         }
633 }
634
635 static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
636 {
637         struct ub_dev *sc = lun->udev;
638         struct ub_scsi_cmd *cmd;
639         struct ub_request *urq;
640         int n_elem;
641
642         if (atomic_read(&sc->poison) || lun->changed) {
643                 blkdev_dequeue_request(rq);
644                 ub_end_rq(rq, 0);
645                 return 0;
646         }
647
648         if (lun->urq.rq != NULL)
649                 return -1;
650         if ((cmd = ub_get_cmd(lun)) == NULL)
651                 return -1;
652         memset(cmd, 0, sizeof(struct ub_scsi_cmd));
653
654         blkdev_dequeue_request(rq);
655
656         urq = &lun->urq;
657         memset(urq, 0, sizeof(struct ub_request));
658         urq->rq = rq;
659
660         /*
661          * get scatterlist from block layer
662          */
663         n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
664         if (n_elem < 0) {
665                 /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
666                 printk(KERN_INFO "%s: failed request map (%d)\n",
667                     lun->name, n_elem);
668                 goto drop;
669         }
670         if (n_elem > UB_MAX_REQ_SG) {   /* Paranoia */
671                 printk(KERN_WARNING "%s: request with %d segments\n",
672                     lun->name, n_elem);
673                 goto drop;
674         }
675         urq->nsg = n_elem;
676         sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
677
678         if (blk_pc_request(rq)) {
679                 ub_cmd_build_packet(sc, lun, cmd, urq);
680         } else {
681                 ub_cmd_build_block(sc, lun, cmd, urq);
682         }
683         cmd->state = UB_CMDST_INIT;
684         cmd->lun = lun;
685         cmd->done = ub_rw_cmd_done;
686         cmd->back = urq;
687
688         cmd->tag = sc->tagcnt++;
689         if (ub_submit_scsi(sc, cmd) != 0)
690                 goto drop;
691
692         return 0;
693
694 drop:
695         ub_put_cmd(lun, cmd);
696         ub_end_rq(rq, 0);
697         return 0;
698 }
699
700 static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
701     struct ub_scsi_cmd *cmd, struct ub_request *urq)
702 {
703         struct request *rq = urq->rq;
704         unsigned int block, nblks;
705
706         if (rq_data_dir(rq) == WRITE)
707                 cmd->dir = UB_DIR_WRITE;
708         else
709                 cmd->dir = UB_DIR_READ;
710
711         cmd->nsg = urq->nsg;
712         memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
713
714         /*
715          * build the command
716          *
717          * The call to blk_queue_hardsect_size() guarantees that request
718          * is aligned, but it is given in terms of 512 byte units, always.
719          */
720         block = rq->sector >> lun->capacity.bshift;
721         nblks = rq->nr_sectors >> lun->capacity.bshift;
722
723         cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
724         /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
725         cmd->cdb[2] = block >> 24;
726         cmd->cdb[3] = block >> 16;
727         cmd->cdb[4] = block >> 8;
728         cmd->cdb[5] = block;
729         cmd->cdb[7] = nblks >> 8;
730         cmd->cdb[8] = nblks;
731         cmd->cdb_len = 10;
732
733         cmd->len = rq->nr_sectors * 512;
734 }
735
736 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
737     struct ub_scsi_cmd *cmd, struct ub_request *urq)
738 {
739         struct request *rq = urq->rq;
740
741         if (rq->data_len == 0) {
742                 cmd->dir = UB_DIR_NONE;
743         } else {
744                 if (rq_data_dir(rq) == WRITE)
745                         cmd->dir = UB_DIR_WRITE;
746                 else
747                         cmd->dir = UB_DIR_READ;
748         }
749
750         cmd->nsg = urq->nsg;
751         memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
752
753         memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
754         cmd->cdb_len = rq->cmd_len;
755
756         cmd->len = rq->data_len;
757 }
758
759 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
760 {
761         struct ub_lun *lun = cmd->lun;
762         struct ub_request *urq = cmd->back;
763         struct request *rq;
764         int uptodate;
765
766         rq = urq->rq;
767
768         if (cmd->error == 0) {
769                 uptodate = 1;
770
771                 if (blk_pc_request(rq)) {
772                         if (cmd->act_len >= rq->data_len)
773                                 rq->data_len = 0;
774                         else
775                                 rq->data_len -= cmd->act_len;
776                 }
777         } else {
778                 uptodate = 0;
779
780                 if (blk_pc_request(rq)) {
781                         /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
782                         memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
783                         rq->sense_len = UB_SENSE_SIZE;
784                         if (sc->top_sense[0] != 0)
785                                 rq->errors = SAM_STAT_CHECK_CONDITION;
786                         else
787                                 rq->errors = DID_ERROR << 16;
788                 } else {
789                         if (cmd->error == -EIO) {
790                                 if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
791                                         return;
792                         }
793                 }
794         }
795
796         urq->rq = NULL;
797
798         ub_put_cmd(lun, cmd);
799         ub_end_rq(rq, uptodate);
800         blk_start_queue(lun->disk->queue);
801 }
802
803 static void ub_end_rq(struct request *rq, int uptodate)
804 {
805         end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
806         end_that_request_last(rq, uptodate);
807 }
808
809 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
810     struct ub_request *urq, struct ub_scsi_cmd *cmd)
811 {
812
813         if (atomic_read(&sc->poison))
814                 return -ENXIO;
815
816         ub_reset_enter(sc, urq->current_try);
817
818         if (urq->current_try >= 3)
819                 return -EIO;
820         urq->current_try++;
821
822         /* Remove this if anyone complains of flooding. */
823         printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
824             "[sense %x %02x %02x] retry %d\n",
825             sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
826             cmd->key, cmd->asc, cmd->ascq, urq->current_try);
827
828         memset(cmd, 0, sizeof(struct ub_scsi_cmd));
829         ub_cmd_build_block(sc, lun, cmd, urq);
830
831         cmd->state = UB_CMDST_INIT;
832         cmd->lun = lun;
833         cmd->done = ub_rw_cmd_done;
834         cmd->back = urq;
835
836         cmd->tag = sc->tagcnt++;
837
838 #if 0 /* Wasteful */
839         return ub_submit_scsi(sc, cmd);
840 #else
841         ub_cmdq_add(sc, cmd);
842         return 0;
843 #endif
844 }
845
846 /*
847  * Submit a regular SCSI operation (not an auto-sense).
848  *
849  * The Iron Law of Good Submit Routine is:
850  * Zero return - callback is done, Nonzero return - callback is not done.
851  * No exceptions.
852  *
853  * Host is assumed locked.
854  */
855 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
856 {
857
858         if (cmd->state != UB_CMDST_INIT ||
859             (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
860                 return -EINVAL;
861         }
862
863         ub_cmdq_add(sc, cmd);
864         /*
865          * We can call ub_scsi_dispatch(sc) right away here, but it's a little
866          * safer to jump to a tasklet, in case upper layers do something silly.
867          */
868         tasklet_schedule(&sc->tasklet);
869         return 0;
870 }
871
872 /*
873  * Submit the first URB for the queued command.
874  * This function does not deal with queueing in any way.
875  */
876 static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
877 {
878         struct bulk_cb_wrap *bcb;
879         int rc;
880
881         bcb = &sc->work_bcb;
882
883         /*
884          * ``If the allocation length is eighteen or greater, and a device
885          * server returns less than eithteen bytes of data, the application
886          * client should assume that the bytes not transferred would have been
887          * zeroes had the device server returned those bytes.''
888          *
889          * We zero sense for all commands so that when a packet request
890          * fails it does not return a stale sense.
891          */
892         memset(&sc->top_sense, 0, UB_SENSE_SIZE);
893
894         /* set up the command wrapper */
895         bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
896         bcb->Tag = cmd->tag;            /* Endianness is not important */
897         bcb->DataTransferLength = cpu_to_le32(cmd->len);
898         bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
899         bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
900         bcb->Length = cmd->cdb_len;
901
902         /* copy the command payload */
903         memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
904
905         UB_INIT_COMPLETION(sc->work_done);
906
907         sc->last_pipe = sc->send_bulk_pipe;
908         usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
909             bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
910
911         /* Fill what we shouldn't be filling, because usb-storage did so. */
912         sc->work_urb.actual_length = 0;
913         sc->work_urb.error_count = 0;
914         sc->work_urb.status = 0;
915
916         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
917                 /* XXX Clear stalls */
918                 ub_complete(&sc->work_done);
919                 return rc;
920         }
921
922         sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
923         add_timer(&sc->work_timer);
924
925         cmd->state = UB_CMDST_CMD;
926         return 0;
927 }
928
929 /*
930  * Timeout handler.
931  */
932 static void ub_urb_timeout(unsigned long arg)
933 {
934         struct ub_dev *sc = (struct ub_dev *) arg;
935         unsigned long flags;
936
937         spin_lock_irqsave(sc->lock, flags);
938         if (!ub_is_completed(&sc->work_done))
939                 usb_unlink_urb(&sc->work_urb);
940         spin_unlock_irqrestore(sc->lock, flags);
941 }
942
943 /*
944  * Completion routine for the work URB.
945  *
946  * This can be called directly from usb_submit_urb (while we have
947  * the sc->lock taken) and from an interrupt (while we do NOT have
948  * the sc->lock taken). Therefore, bounce this off to a tasklet.
949  */
950 static void ub_urb_complete(struct urb *urb, struct pt_regs *pt)
951 {
952         struct ub_dev *sc = urb->context;
953
954         ub_complete(&sc->work_done);
955         tasklet_schedule(&sc->tasklet);
956 }
957
958 static void ub_scsi_action(unsigned long _dev)
959 {
960         struct ub_dev *sc = (struct ub_dev *) _dev;
961         unsigned long flags;
962
963         spin_lock_irqsave(sc->lock, flags);
964         ub_scsi_dispatch(sc);
965         spin_unlock_irqrestore(sc->lock, flags);
966 }
967
968 static void ub_scsi_dispatch(struct ub_dev *sc)
969 {
970         struct ub_scsi_cmd *cmd;
971         int rc;
972
973         while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
974                 if (cmd->state == UB_CMDST_DONE) {
975                         ub_cmdq_pop(sc);
976                         (*cmd->done)(sc, cmd);
977                 } else if (cmd->state == UB_CMDST_INIT) {
978                         if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
979                                 break;
980                         cmd->error = rc;
981                         cmd->state = UB_CMDST_DONE;
982                 } else {
983                         if (!ub_is_completed(&sc->work_done))
984                                 break;
985                         del_timer(&sc->work_timer);
986                         ub_scsi_urb_compl(sc, cmd);
987                 }
988         }
989 }
990
991 static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
992 {
993         struct urb *urb = &sc->work_urb;
994         struct bulk_cs_wrap *bcs;
995         int len;
996         int rc;
997
998         if (atomic_read(&sc->poison)) {
999                 ub_state_done(sc, cmd, -ENODEV);
1000                 return;
1001         }
1002
1003         if (cmd->state == UB_CMDST_CLEAR) {
1004                 if (urb->status == -EPIPE) {
1005                         /*
1006                          * STALL while clearning STALL.
1007                          * The control pipe clears itself - nothing to do.
1008                          */
1009                         printk(KERN_NOTICE "%s: stall on control pipe\n",
1010                             sc->name);
1011                         goto Bad_End;
1012                 }
1013
1014                 /*
1015                  * We ignore the result for the halt clear.
1016                  */
1017
1018                 /* reset the endpoint toggle */
1019                 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1020                         usb_pipeout(sc->last_pipe), 0);
1021
1022                 ub_state_sense(sc, cmd);
1023
1024         } else if (cmd->state == UB_CMDST_CLR2STS) {
1025                 if (urb->status == -EPIPE) {
1026                         printk(KERN_NOTICE "%s: stall on control pipe\n",
1027                             sc->name);
1028                         goto Bad_End;
1029                 }
1030
1031                 /*
1032                  * We ignore the result for the halt clear.
1033                  */
1034
1035                 /* reset the endpoint toggle */
1036                 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1037                         usb_pipeout(sc->last_pipe), 0);
1038
1039                 ub_state_stat(sc, cmd);
1040
1041         } else if (cmd->state == UB_CMDST_CLRRS) {
1042                 if (urb->status == -EPIPE) {
1043                         printk(KERN_NOTICE "%s: stall on control pipe\n",
1044                             sc->name);
1045                         goto Bad_End;
1046                 }
1047
1048                 /*
1049                  * We ignore the result for the halt clear.
1050                  */
1051
1052                 /* reset the endpoint toggle */
1053                 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1054                         usb_pipeout(sc->last_pipe), 0);
1055
1056                 ub_state_stat_counted(sc, cmd);
1057
1058         } else if (cmd->state == UB_CMDST_CMD) {
1059                 switch (urb->status) {
1060                 case 0:
1061                         break;
1062                 case -EOVERFLOW:
1063                         goto Bad_End;
1064                 case -EPIPE:
1065                         rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1066                         if (rc != 0) {
1067                                 printk(KERN_NOTICE "%s: "
1068                                     "unable to submit clear (%d)\n",
1069                                     sc->name, rc);
1070                                 /*
1071                                  * This is typically ENOMEM or some other such shit.
1072                                  * Retrying is pointless. Just do Bad End on it...
1073                                  */
1074                                 ub_state_done(sc, cmd, rc);
1075                                 return;
1076                         }
1077                         cmd->state = UB_CMDST_CLEAR;
1078                         return;
1079                 case -ESHUTDOWN:        /* unplug */
1080                 case -EILSEQ:           /* unplug timeout on uhci */
1081                         ub_state_done(sc, cmd, -ENODEV);
1082                         return;
1083                 default:
1084                         goto Bad_End;
1085                 }
1086                 if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1087                         goto Bad_End;
1088                 }
1089
1090                 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1091                         ub_state_stat(sc, cmd);
1092                         return;
1093                 }
1094
1095                 // udelay(125);         // usb-storage has this
1096                 ub_data_start(sc, cmd);
1097
1098         } else if (cmd->state == UB_CMDST_DATA) {
1099                 if (urb->status == -EPIPE) {
1100                         rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1101                         if (rc != 0) {
1102                                 printk(KERN_NOTICE "%s: "
1103                                     "unable to submit clear (%d)\n",
1104                                     sc->name, rc);
1105                                 ub_state_done(sc, cmd, rc);
1106                                 return;
1107                         }
1108                         cmd->state = UB_CMDST_CLR2STS;
1109                         return;
1110                 }
1111                 if (urb->status == -EOVERFLOW) {
1112                         /*
1113                          * A babble? Failure, but we must transfer CSW now.
1114                          */
1115                         cmd->error = -EOVERFLOW;        /* A cheap trick... */
1116                         ub_state_stat(sc, cmd);
1117                         return;
1118                 }
1119
1120                 if (cmd->dir == UB_DIR_WRITE) {
1121                         /*
1122                          * Do not continue writes in case of a failure.
1123                          * Doing so would cause sectors to be mixed up,
1124                          * which is worse than sectors lost.
1125                          *
1126                          * We must try to read the CSW, or many devices
1127                          * get confused.
1128                          */
1129                         len = urb->actual_length;
1130                         if (urb->status != 0 ||
1131                             len != cmd->sgv[cmd->current_sg].length) {
1132                                 cmd->act_len += len;
1133
1134                                 cmd->error = -EIO;
1135                                 ub_state_stat(sc, cmd);
1136                                 return;
1137                         }
1138
1139                 } else {
1140                         /*
1141                          * If an error occurs on read, we record it, and
1142                          * continue to fetch data in order to avoid bubble.
1143                          *
1144                          * As a small shortcut, we stop if we detect that
1145                          * a CSW mixed into data.
1146                          */
1147                         if (urb->status != 0)
1148                                 cmd->error = -EIO;
1149
1150                         len = urb->actual_length;
1151                         if (urb->status != 0 ||
1152                             len != cmd->sgv[cmd->current_sg].length) {
1153                                 if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1154                                         goto Bad_End;
1155                         }
1156                 }
1157
1158                 cmd->act_len += urb->actual_length;
1159
1160                 if (++cmd->current_sg < cmd->nsg) {
1161                         ub_data_start(sc, cmd);
1162                         return;
1163                 }
1164                 ub_state_stat(sc, cmd);
1165
1166         } else if (cmd->state == UB_CMDST_STAT) {
1167                 if (urb->status == -EPIPE) {
1168                         rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1169                         if (rc != 0) {
1170                                 printk(KERN_NOTICE "%s: "
1171                                     "unable to submit clear (%d)\n",
1172                                     sc->name, rc);
1173                                 ub_state_done(sc, cmd, rc);
1174                                 return;
1175                         }
1176
1177                         /*
1178                          * Having a stall when getting CSW is an error, so
1179                          * make sure uppper levels are not oblivious to it.
1180                          */
1181                         cmd->error = -EIO;              /* A cheap trick... */
1182
1183                         cmd->state = UB_CMDST_CLRRS;
1184                         return;
1185                 }
1186
1187                 /* Catch everything, including -EOVERFLOW and other nasties. */
1188                 if (urb->status != 0)
1189                         goto Bad_End;
1190
1191                 if (urb->actual_length == 0) {
1192                         ub_state_stat_counted(sc, cmd);
1193                         return;
1194                 }
1195
1196                 /*
1197                  * Check the returned Bulk protocol status.
1198                  * The status block has to be validated first.
1199                  */
1200
1201                 bcs = &sc->work_bcs;
1202
1203                 if (sc->signature == cpu_to_le32(0)) {
1204                         /*
1205                          * This is the first reply, so do not perform the check.
1206                          * Instead, remember the signature the device uses
1207                          * for future checks. But do not allow a nul.
1208                          */
1209                         sc->signature = bcs->Signature;
1210                         if (sc->signature == cpu_to_le32(0)) {
1211                                 ub_state_stat_counted(sc, cmd);
1212                                 return;
1213                         }
1214                 } else {
1215                         if (bcs->Signature != sc->signature) {
1216                                 ub_state_stat_counted(sc, cmd);
1217                                 return;
1218                         }
1219                 }
1220
1221                 if (bcs->Tag != cmd->tag) {
1222                         /*
1223                          * This usually happens when we disagree with the
1224                          * device's microcode about something. For instance,
1225                          * a few of them throw this after timeouts. They buffer
1226                          * commands and reply at commands we timed out before.
1227                          * Without flushing these replies we loop forever.
1228                          */
1229                         ub_state_stat_counted(sc, cmd);
1230                         return;
1231                 }
1232
1233                 len = le32_to_cpu(bcs->Residue);
1234                 if (len != cmd->len - cmd->act_len) {
1235                         /*
1236                          * It is all right to transfer less, the caller has
1237                          * to check. But it's not all right if the device
1238                          * counts disagree with our counts.
1239                          */
1240                         goto Bad_End;
1241                 }
1242
1243                 switch (bcs->Status) {
1244                 case US_BULK_STAT_OK:
1245                         break;
1246                 case US_BULK_STAT_FAIL:
1247                         ub_state_sense(sc, cmd);
1248                         return;
1249                 case US_BULK_STAT_PHASE:
1250                         goto Bad_End;
1251                 default:
1252                         printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1253                             sc->name, bcs->Status);
1254                         ub_state_done(sc, cmd, -EINVAL);
1255                         return;
1256                 }
1257
1258                 /* Not zeroing error to preserve a babble indicator */
1259                 if (cmd->error != 0) {
1260                         ub_state_sense(sc, cmd);
1261                         return;
1262                 }
1263                 cmd->state = UB_CMDST_DONE;
1264                 ub_cmdq_pop(sc);
1265                 (*cmd->done)(sc, cmd);
1266
1267         } else if (cmd->state == UB_CMDST_SENSE) {
1268                 ub_state_done(sc, cmd, -EIO);
1269
1270         } else {
1271                 printk(KERN_WARNING "%s: "
1272                     "wrong command state %d\n",
1273                     sc->name, cmd->state);
1274                 ub_state_done(sc, cmd, -EINVAL);
1275                 return;
1276         }
1277         return;
1278
1279 Bad_End: /* Little Excel is dead */
1280         ub_state_done(sc, cmd, -EIO);
1281 }
1282
1283 /*
1284  * Factorization helper for the command state machine:
1285  * Initiate a data segment transfer.
1286  */
1287 static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1288 {
1289         struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1290         int pipe;
1291         int rc;
1292
1293         UB_INIT_COMPLETION(sc->work_done);
1294
1295         if (cmd->dir == UB_DIR_READ)
1296                 pipe = sc->recv_bulk_pipe;
1297         else
1298                 pipe = sc->send_bulk_pipe;
1299         sc->last_pipe = pipe;
1300         usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1301             page_address(sg->page) + sg->offset, sg->length,
1302             ub_urb_complete, sc);
1303         sc->work_urb.actual_length = 0;
1304         sc->work_urb.error_count = 0;
1305         sc->work_urb.status = 0;
1306
1307         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1308                 /* XXX Clear stalls */
1309                 ub_complete(&sc->work_done);
1310                 ub_state_done(sc, cmd, rc);
1311                 return;
1312         }
1313
1314         sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1315         add_timer(&sc->work_timer);
1316
1317         cmd->state = UB_CMDST_DATA;
1318 }
1319
1320 /*
1321  * Factorization helper for the command state machine:
1322  * Finish the command.
1323  */
1324 static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1325 {
1326
1327         cmd->error = rc;
1328         cmd->state = UB_CMDST_DONE;
1329         ub_cmdq_pop(sc);
1330         (*cmd->done)(sc, cmd);
1331 }
1332
1333 /*
1334  * Factorization helper for the command state machine:
1335  * Submit a CSW read.
1336  */
1337 static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1338 {
1339         int rc;
1340
1341         UB_INIT_COMPLETION(sc->work_done);
1342
1343         sc->last_pipe = sc->recv_bulk_pipe;
1344         usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1345             &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1346         sc->work_urb.actual_length = 0;
1347         sc->work_urb.error_count = 0;
1348         sc->work_urb.status = 0;
1349
1350         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1351                 /* XXX Clear stalls */
1352                 ub_complete(&sc->work_done);
1353                 ub_state_done(sc, cmd, rc);
1354                 return -1;
1355         }
1356
1357         sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1358         add_timer(&sc->work_timer);
1359         return 0;
1360 }
1361
1362 /*
1363  * Factorization helper for the command state machine:
1364  * Submit a CSW read and go to STAT state.
1365  */
1366 static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1367 {
1368
1369         if (__ub_state_stat(sc, cmd) != 0)
1370                 return;
1371
1372         cmd->stat_count = 0;
1373         cmd->state = UB_CMDST_STAT;
1374 }
1375
1376 /*
1377  * Factorization helper for the command state machine:
1378  * Submit a CSW read and go to STAT state with counter (along [C] path).
1379  */
1380 static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1381 {
1382
1383         if (++cmd->stat_count >= 4) {
1384                 ub_state_sense(sc, cmd);
1385                 return;
1386         }
1387
1388         if (__ub_state_stat(sc, cmd) != 0)
1389                 return;
1390
1391         cmd->state = UB_CMDST_STAT;
1392 }
1393
1394 /*
1395  * Factorization helper for the command state machine:
1396  * Submit a REQUEST SENSE and go to SENSE state.
1397  */
1398 static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1399 {
1400         struct ub_scsi_cmd *scmd;
1401         struct scatterlist *sg;
1402         int rc;
1403
1404         if (cmd->cdb[0] == REQUEST_SENSE) {
1405                 rc = -EPIPE;
1406                 goto error;
1407         }
1408
1409         scmd = &sc->top_rqs_cmd;
1410         memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1411         scmd->cdb[0] = REQUEST_SENSE;
1412         scmd->cdb[4] = UB_SENSE_SIZE;
1413         scmd->cdb_len = 6;
1414         scmd->dir = UB_DIR_READ;
1415         scmd->state = UB_CMDST_INIT;
1416         scmd->nsg = 1;
1417         sg = &scmd->sgv[0];
1418         sg->page = virt_to_page(sc->top_sense);
1419         sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1);
1420         sg->length = UB_SENSE_SIZE;
1421         scmd->len = UB_SENSE_SIZE;
1422         scmd->lun = cmd->lun;
1423         scmd->done = ub_top_sense_done;
1424         scmd->back = cmd;
1425
1426         scmd->tag = sc->tagcnt++;
1427
1428         cmd->state = UB_CMDST_SENSE;
1429
1430         ub_cmdq_insert(sc, scmd);
1431         return;
1432
1433 error:
1434         ub_state_done(sc, cmd, rc);
1435 }
1436
1437 /*
1438  * A helper for the command's state machine:
1439  * Submit a stall clear.
1440  */
1441 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1442     int stalled_pipe)
1443 {
1444         int endp;
1445         struct usb_ctrlrequest *cr;
1446         int rc;
1447
1448         endp = usb_pipeendpoint(stalled_pipe);
1449         if (usb_pipein (stalled_pipe))
1450                 endp |= USB_DIR_IN;
1451
1452         cr = &sc->work_cr;
1453         cr->bRequestType = USB_RECIP_ENDPOINT;
1454         cr->bRequest = USB_REQ_CLEAR_FEATURE;
1455         cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1456         cr->wIndex = cpu_to_le16(endp);
1457         cr->wLength = cpu_to_le16(0);
1458
1459         UB_INIT_COMPLETION(sc->work_done);
1460
1461         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1462             (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1463         sc->work_urb.actual_length = 0;
1464         sc->work_urb.error_count = 0;
1465         sc->work_urb.status = 0;
1466
1467         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1468                 ub_complete(&sc->work_done);
1469                 return rc;
1470         }
1471
1472         sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1473         add_timer(&sc->work_timer);
1474         return 0;
1475 }
1476
1477 /*
1478  */
1479 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1480 {
1481         unsigned char *sense = sc->top_sense;
1482         struct ub_scsi_cmd *cmd;
1483
1484         /*
1485          * Find the command which triggered the unit attention or a check,
1486          * save the sense into it, and advance its state machine.
1487          */
1488         if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1489                 printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1490                 return;
1491         }
1492         if (cmd != scmd->back) {
1493                 printk(KERN_WARNING "%s: "
1494                     "sense done for wrong command 0x%x\n",
1495                     sc->name, cmd->tag);
1496                 return;
1497         }
1498         if (cmd->state != UB_CMDST_SENSE) {
1499                 printk(KERN_WARNING "%s: "
1500                     "sense done with bad cmd state %d\n",
1501                     sc->name, cmd->state);
1502                 return;
1503         }
1504
1505         /*
1506          * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1507          */
1508         cmd->key = sense[2] & 0x0F;
1509         cmd->asc = sense[12];
1510         cmd->ascq = sense[13];
1511
1512         ub_scsi_urb_compl(sc, cmd);
1513 }
1514
1515 /*
1516  * Reset management
1517  * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing.
1518  * XXX Make usb_sync_reset asynchronous.
1519  */
1520
1521 static void ub_reset_enter(struct ub_dev *sc, int try)
1522 {
1523
1524         if (sc->reset) {
1525                 /* This happens often on multi-LUN devices. */
1526                 return;
1527         }
1528         sc->reset = try + 1;
1529
1530 #if 0 /* Not needed because the disconnect waits for us. */
1531         unsigned long flags;
1532         spin_lock_irqsave(&ub_lock, flags);
1533         sc->openc++;
1534         spin_unlock_irqrestore(&ub_lock, flags);
1535 #endif
1536
1537 #if 0 /* We let them stop themselves. */
1538         struct list_head *p;
1539         struct ub_lun *lun;
1540         list_for_each(p, &sc->luns) {
1541                 lun = list_entry(p, struct ub_lun, link);
1542                 blk_stop_queue(lun->disk->queue);
1543         }
1544 #endif
1545
1546         schedule_work(&sc->reset_work);
1547 }
1548
1549 static void ub_reset_task(void *arg)
1550 {
1551         struct ub_dev *sc = arg;
1552         unsigned long flags;
1553         struct list_head *p;
1554         struct ub_lun *lun;
1555         int lkr, rc;
1556
1557         if (!sc->reset) {
1558                 printk(KERN_WARNING "%s: Running reset unrequested\n",
1559                     sc->name);
1560                 return;
1561         }
1562
1563         if (atomic_read(&sc->poison)) {
1564                 ;
1565         } else if ((sc->reset & 1) == 0) {
1566                 ub_sync_reset(sc);
1567                 msleep(700);    /* usb-storage sleeps 6s (!) */
1568                 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1569                 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1570         } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1571                 ;
1572         } else {
1573                 if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) {
1574                         printk(KERN_NOTICE
1575                             "%s: usb_lock_device_for_reset failed (%d)\n",
1576                             sc->name, lkr);
1577                 } else {
1578                         rc = usb_reset_device(sc->dev);
1579                         if (rc < 0) {
1580                                 printk(KERN_NOTICE "%s: "
1581                                     "usb_lock_device_for_reset failed (%d)\n",
1582                                     sc->name, rc);
1583                         }
1584
1585                         if (lkr)
1586                                 usb_unlock_device(sc->dev);
1587                 }
1588         }
1589
1590         /*
1591          * In theory, no commands can be running while reset is active,
1592          * so nobody can ask for another reset, and so we do not need any
1593          * queues of resets or anything. We do need a spinlock though,
1594          * to interact with block layer.
1595          */
1596         spin_lock_irqsave(sc->lock, flags);
1597         sc->reset = 0;
1598         tasklet_schedule(&sc->tasklet);
1599         list_for_each(p, &sc->luns) {
1600                 lun = list_entry(p, struct ub_lun, link);
1601                 blk_start_queue(lun->disk->queue);
1602         }
1603         wake_up(&sc->reset_wait);
1604         spin_unlock_irqrestore(sc->lock, flags);
1605 }
1606
1607 /*
1608  * This is called from a process context.
1609  */
1610 static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1611 {
1612
1613         lun->readonly = 0;      /* XXX Query this from the device */
1614
1615         lun->capacity.nsec = 0;
1616         lun->capacity.bsize = 512;
1617         lun->capacity.bshift = 0;
1618
1619         if (ub_sync_tur(sc, lun) != 0)
1620                 return;                 /* Not ready */
1621         lun->changed = 0;
1622
1623         if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1624                 /*
1625                  * The retry here means something is wrong, either with the
1626                  * device, with the transport, or with our code.
1627                  * We keep this because sd.c has retries for capacity.
1628                  */
1629                 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1630                         lun->capacity.nsec = 0;
1631                         lun->capacity.bsize = 512;
1632                         lun->capacity.bshift = 0;
1633                 }
1634         }
1635 }
1636
1637 /*
1638  * The open funcion.
1639  * This is mostly needed to keep refcounting, but also to support
1640  * media checks on removable media drives.
1641  */
1642 static int ub_bd_open(struct inode *inode, struct file *filp)
1643 {
1644         struct gendisk *disk = inode->i_bdev->bd_disk;
1645         struct ub_lun *lun = disk->private_data;
1646         struct ub_dev *sc = lun->udev;
1647         unsigned long flags;
1648         int rc;
1649
1650         spin_lock_irqsave(&ub_lock, flags);
1651         if (atomic_read(&sc->poison)) {
1652                 spin_unlock_irqrestore(&ub_lock, flags);
1653                 return -ENXIO;
1654         }
1655         sc->openc++;
1656         spin_unlock_irqrestore(&ub_lock, flags);
1657
1658         if (lun->removable || lun->readonly)
1659                 check_disk_change(inode->i_bdev);
1660
1661         /*
1662          * The sd.c considers ->media_present and ->changed not equivalent,
1663          * under some pretty murky conditions (a failure of READ CAPACITY).
1664          * We may need it one day.
1665          */
1666         if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) {
1667                 rc = -ENOMEDIUM;
1668                 goto err_open;
1669         }
1670
1671         if (lun->readonly && (filp->f_mode & FMODE_WRITE)) {
1672                 rc = -EROFS;
1673                 goto err_open;
1674         }
1675
1676         return 0;
1677
1678 err_open:
1679         ub_put(sc);
1680         return rc;
1681 }
1682
1683 /*
1684  */
1685 static int ub_bd_release(struct inode *inode, struct file *filp)
1686 {
1687         struct gendisk *disk = inode->i_bdev->bd_disk;
1688         struct ub_lun *lun = disk->private_data;
1689         struct ub_dev *sc = lun->udev;
1690
1691         ub_put(sc);
1692         return 0;
1693 }
1694
1695 /*
1696  * The ioctl interface.
1697  */
1698 static int ub_bd_ioctl(struct inode *inode, struct file *filp,
1699     unsigned int cmd, unsigned long arg)
1700 {
1701         struct gendisk *disk = inode->i_bdev->bd_disk;
1702         void __user *usermem = (void __user *) arg;
1703
1704         return scsi_cmd_ioctl(filp, disk, cmd, usermem);
1705 }
1706
1707 /*
1708  * This is called once a new disk was seen by the block layer or by ub_probe().
1709  * The main onjective here is to discover the features of the media such as
1710  * the capacity, read-only status, etc. USB storage generally does not
1711  * need to be spun up, but if we needed it, this would be the place.
1712  *
1713  * This call can sleep.
1714  *
1715  * The return code is not used.
1716  */
1717 static int ub_bd_revalidate(struct gendisk *disk)
1718 {
1719         struct ub_lun *lun = disk->private_data;
1720
1721         ub_revalidate(lun->udev, lun);
1722
1723         /* XXX Support sector size switching like in sr.c */
1724         blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
1725         set_capacity(disk, lun->capacity.nsec);
1726         // set_disk_ro(sdkp->disk, lun->readonly);
1727
1728         return 0;
1729 }
1730
1731 /*
1732  * The check is called by the block layer to verify if the media
1733  * is still available. It is supposed to be harmless, lightweight and
1734  * non-intrusive in case the media was not changed.
1735  *
1736  * This call can sleep.
1737  *
1738  * The return code is bool!
1739  */
1740 static int ub_bd_media_changed(struct gendisk *disk)
1741 {
1742         struct ub_lun *lun = disk->private_data;
1743
1744         if (!lun->removable)
1745                 return 0;
1746
1747         /*
1748          * We clean checks always after every command, so this is not
1749          * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1750          * the device is actually not ready with operator or software
1751          * intervention required. One dangerous item might be a drive which
1752          * spins itself down, and come the time to write dirty pages, this
1753          * will fail, then block layer discards the data. Since we never
1754          * spin drives up, such devices simply cannot be used with ub anyway.
1755          */
1756         if (ub_sync_tur(lun->udev, lun) != 0) {
1757                 lun->changed = 1;
1758                 return 1;
1759         }
1760
1761         return lun->changed;
1762 }
1763
1764 static struct block_device_operations ub_bd_fops = {
1765         .owner          = THIS_MODULE,
1766         .open           = ub_bd_open,
1767         .release        = ub_bd_release,
1768         .ioctl          = ub_bd_ioctl,
1769         .media_changed  = ub_bd_media_changed,
1770         .revalidate_disk = ub_bd_revalidate,
1771 };
1772
1773 /*
1774  * Common ->done routine for commands executed synchronously.
1775  */
1776 static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1777 {
1778         struct completion *cop = cmd->back;
1779         complete(cop);
1780 }
1781
1782 /*
1783  * Test if the device has a check condition on it, synchronously.
1784  */
1785 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1786 {
1787         struct ub_scsi_cmd *cmd;
1788         enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1789         unsigned long flags;
1790         struct completion compl;
1791         int rc;
1792
1793         init_completion(&compl);
1794
1795         rc = -ENOMEM;
1796         if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1797                 goto err_alloc;
1798
1799         cmd->cdb[0] = TEST_UNIT_READY;
1800         cmd->cdb_len = 6;
1801         cmd->dir = UB_DIR_NONE;
1802         cmd->state = UB_CMDST_INIT;
1803         cmd->lun = lun;                 /* This may be NULL, but that's ok */
1804         cmd->done = ub_probe_done;
1805         cmd->back = &compl;
1806
1807         spin_lock_irqsave(sc->lock, flags);
1808         cmd->tag = sc->tagcnt++;
1809
1810         rc = ub_submit_scsi(sc, cmd);
1811         spin_unlock_irqrestore(sc->lock, flags);
1812
1813         if (rc != 0)
1814                 goto err_submit;
1815
1816         wait_for_completion(&compl);
1817
1818         rc = cmd->error;
1819
1820         if (rc == -EIO && cmd->key != 0)        /* Retries for benh's key */
1821                 rc = cmd->key;
1822
1823 err_submit:
1824         kfree(cmd);
1825 err_alloc:
1826         return rc;
1827 }
1828
1829 /*
1830  * Read the SCSI capacity synchronously (for probing).
1831  */
1832 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1833     struct ub_capacity *ret)
1834 {
1835         struct ub_scsi_cmd *cmd;
1836         struct scatterlist *sg;
1837         char *p;
1838         enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1839         unsigned long flags;
1840         unsigned int bsize, shift;
1841         unsigned long nsec;
1842         struct completion compl;
1843         int rc;
1844
1845         init_completion(&compl);
1846
1847         rc = -ENOMEM;
1848         if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1849                 goto err_alloc;
1850         p = (char *)cmd + sizeof(struct ub_scsi_cmd);
1851
1852         cmd->cdb[0] = 0x25;
1853         cmd->cdb_len = 10;
1854         cmd->dir = UB_DIR_READ;
1855         cmd->state = UB_CMDST_INIT;
1856         cmd->nsg = 1;
1857         sg = &cmd->sgv[0];
1858         sg->page = virt_to_page(p);
1859         sg->offset = (unsigned long)p & (PAGE_SIZE-1);
1860         sg->length = 8;
1861         cmd->len = 8;
1862         cmd->lun = lun;
1863         cmd->done = ub_probe_done;
1864         cmd->back = &compl;
1865
1866         spin_lock_irqsave(sc->lock, flags);
1867         cmd->tag = sc->tagcnt++;
1868
1869         rc = ub_submit_scsi(sc, cmd);
1870         spin_unlock_irqrestore(sc->lock, flags);
1871
1872         if (rc != 0)
1873                 goto err_submit;
1874
1875         wait_for_completion(&compl);
1876
1877         if (cmd->error != 0) {
1878                 rc = -EIO;
1879                 goto err_read;
1880         }
1881         if (cmd->act_len != 8) {
1882                 rc = -EIO;
1883                 goto err_read;
1884         }
1885
1886         /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
1887         nsec = be32_to_cpu(*(__be32 *)p) + 1;
1888         bsize = be32_to_cpu(*(__be32 *)(p + 4));
1889         switch (bsize) {
1890         case 512:       shift = 0;      break;
1891         case 1024:      shift = 1;      break;
1892         case 2048:      shift = 2;      break;
1893         case 4096:      shift = 3;      break;
1894         default:
1895                 rc = -EDOM;
1896                 goto err_inv_bsize;
1897         }
1898
1899         ret->bsize = bsize;
1900         ret->bshift = shift;
1901         ret->nsec = nsec << shift;
1902         rc = 0;
1903
1904 err_inv_bsize:
1905 err_read:
1906 err_submit:
1907         kfree(cmd);
1908 err_alloc:
1909         return rc;
1910 }
1911
1912 /*
1913  */
1914 static void ub_probe_urb_complete(struct urb *urb, struct pt_regs *pt)
1915 {
1916         struct completion *cop = urb->context;
1917         complete(cop);
1918 }
1919
1920 static void ub_probe_timeout(unsigned long arg)
1921 {
1922         struct completion *cop = (struct completion *) arg;
1923         complete(cop);
1924 }
1925
1926 /*
1927  * Reset with a Bulk reset.
1928  */
1929 static int ub_sync_reset(struct ub_dev *sc)
1930 {
1931         int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1932         struct usb_ctrlrequest *cr;
1933         struct completion compl;
1934         struct timer_list timer;
1935         int rc;
1936
1937         init_completion(&compl);
1938
1939         cr = &sc->work_cr;
1940         cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1941         cr->bRequest = US_BULK_RESET_REQUEST;
1942         cr->wValue = cpu_to_le16(0);
1943         cr->wIndex = cpu_to_le16(ifnum);
1944         cr->wLength = cpu_to_le16(0);
1945
1946         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1947             (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
1948         sc->work_urb.actual_length = 0;
1949         sc->work_urb.error_count = 0;
1950         sc->work_urb.status = 0;
1951
1952         if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
1953                 printk(KERN_WARNING
1954                      "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
1955                 return rc;
1956         }
1957
1958         init_timer(&timer);
1959         timer.function = ub_probe_timeout;
1960         timer.data = (unsigned long) &compl;
1961         timer.expires = jiffies + UB_CTRL_TIMEOUT;
1962         add_timer(&timer);
1963
1964         wait_for_completion(&compl);
1965
1966         del_timer_sync(&timer);
1967         usb_kill_urb(&sc->work_urb);
1968
1969         return sc->work_urb.status;
1970 }
1971
1972 /*
1973  * Get number of LUNs by the way of Bulk GetMaxLUN command.
1974  */
1975 static int ub_sync_getmaxlun(struct ub_dev *sc)
1976 {
1977         int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1978         unsigned char *p;
1979         enum { ALLOC_SIZE = 1 };
1980         struct usb_ctrlrequest *cr;
1981         struct completion compl;
1982         struct timer_list timer;
1983         int nluns;
1984         int rc;
1985
1986         init_completion(&compl);
1987
1988         rc = -ENOMEM;
1989         if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1990                 goto err_alloc;
1991         *p = 55;
1992
1993         cr = &sc->work_cr;
1994         cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1995         cr->bRequest = US_BULK_GET_MAX_LUN;
1996         cr->wValue = cpu_to_le16(0);
1997         cr->wIndex = cpu_to_le16(ifnum);
1998         cr->wLength = cpu_to_le16(1);
1999
2000         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2001             (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
2002         sc->work_urb.actual_length = 0;
2003         sc->work_urb.error_count = 0;
2004         sc->work_urb.status = 0;
2005
2006         if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
2007                 goto err_submit;
2008
2009         init_timer(&timer);
2010         timer.function = ub_probe_timeout;
2011         timer.data = (unsigned long) &compl;
2012         timer.expires = jiffies + UB_CTRL_TIMEOUT;
2013         add_timer(&timer);
2014
2015         wait_for_completion(&compl);
2016
2017         del_timer_sync(&timer);
2018         usb_kill_urb(&sc->work_urb);
2019
2020         if ((rc = sc->work_urb.status) < 0)
2021                 goto err_io;
2022
2023         if (sc->work_urb.actual_length != 1) {
2024                 nluns = 0;
2025         } else {
2026                 if ((nluns = *p) == 55) {
2027                         nluns = 0;
2028                 } else {
2029                         /* GetMaxLUN returns the maximum LUN number */
2030                         nluns += 1;
2031                         if (nluns > UB_MAX_LUNS)
2032                                 nluns = UB_MAX_LUNS;
2033                 }
2034         }
2035
2036         kfree(p);
2037         return nluns;
2038
2039 err_io:
2040 err_submit:
2041         kfree(p);
2042 err_alloc:
2043         return rc;
2044 }
2045
2046 /*
2047  * Clear initial stalls.
2048  */
2049 static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2050 {
2051         int endp;
2052         struct usb_ctrlrequest *cr;
2053         struct completion compl;
2054         struct timer_list timer;
2055         int rc;
2056
2057         init_completion(&compl);
2058
2059         endp = usb_pipeendpoint(stalled_pipe);
2060         if (usb_pipein (stalled_pipe))
2061                 endp |= USB_DIR_IN;
2062
2063         cr = &sc->work_cr;
2064         cr->bRequestType = USB_RECIP_ENDPOINT;
2065         cr->bRequest = USB_REQ_CLEAR_FEATURE;
2066         cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2067         cr->wIndex = cpu_to_le16(endp);
2068         cr->wLength = cpu_to_le16(0);
2069
2070         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2071             (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2072         sc->work_urb.actual_length = 0;
2073         sc->work_urb.error_count = 0;
2074         sc->work_urb.status = 0;
2075
2076         if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2077                 printk(KERN_WARNING
2078                      "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2079                 return rc;
2080         }
2081
2082         init_timer(&timer);
2083         timer.function = ub_probe_timeout;
2084         timer.data = (unsigned long) &compl;
2085         timer.expires = jiffies + UB_CTRL_TIMEOUT;
2086         add_timer(&timer);
2087
2088         wait_for_completion(&compl);
2089
2090         del_timer_sync(&timer);
2091         usb_kill_urb(&sc->work_urb);
2092
2093         /* reset the endpoint toggle */
2094         usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0);
2095
2096         return 0;
2097 }
2098
2099 /*
2100  * Get the pipe settings.
2101  */
2102 static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2103     struct usb_interface *intf)
2104 {
2105         struct usb_host_interface *altsetting = intf->cur_altsetting;
2106         struct usb_endpoint_descriptor *ep_in = NULL;
2107         struct usb_endpoint_descriptor *ep_out = NULL;
2108         struct usb_endpoint_descriptor *ep;
2109         int i;
2110
2111         /*
2112          * Find the endpoints we need.
2113          * We are expecting a minimum of 2 endpoints - in and out (bulk).
2114          * We will ignore any others.
2115          */
2116         for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2117                 ep = &altsetting->endpoint[i].desc;
2118
2119                 /* Is it a BULK endpoint? */
2120                 if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
2121                                 == USB_ENDPOINT_XFER_BULK) {
2122                         /* BULK in or out? */
2123                         if (ep->bEndpointAddress & USB_DIR_IN)
2124                                 ep_in = ep;
2125                         else
2126                                 ep_out = ep;
2127                 }
2128         }
2129
2130         if (ep_in == NULL || ep_out == NULL) {
2131                 printk(KERN_NOTICE "%s: failed endpoint check\n",
2132                     sc->name);
2133                 return -ENODEV;
2134         }
2135
2136         /* Calculate and store the pipe values */
2137         sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2138         sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2139         sc->send_bulk_pipe = usb_sndbulkpipe(dev,
2140                 ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2141         sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, 
2142                 ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2143
2144         return 0;
2145 }
2146
2147 /*
2148  * Probing is done in the process context, which allows us to cheat
2149  * and not to build a state machine for the discovery.
2150  */
2151 static int ub_probe(struct usb_interface *intf,
2152     const struct usb_device_id *dev_id)
2153 {
2154         struct ub_dev *sc;
2155         int nluns;
2156         int rc;
2157         int i;
2158
2159         if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2160                 return -ENXIO;
2161
2162         rc = -ENOMEM;
2163         if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2164                 goto err_core;
2165         sc->lock = ub_next_lock();
2166         INIT_LIST_HEAD(&sc->luns);
2167         usb_init_urb(&sc->work_urb);
2168         tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2169         atomic_set(&sc->poison, 0);
2170         INIT_WORK(&sc->reset_work, ub_reset_task, sc);
2171         init_waitqueue_head(&sc->reset_wait);
2172
2173         init_timer(&sc->work_timer);
2174         sc->work_timer.data = (unsigned long) sc;
2175         sc->work_timer.function = ub_urb_timeout;
2176
2177         ub_init_completion(&sc->work_done);
2178         sc->work_done.done = 1;         /* A little yuk, but oh well... */
2179
2180         sc->dev = interface_to_usbdev(intf);
2181         sc->intf = intf;
2182         // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2183         usb_set_intfdata(intf, sc);
2184         usb_get_dev(sc->dev);
2185         /*
2186          * Since we give the interface struct to the block level through
2187          * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
2188          * oopses on close after a disconnect (kernels 2.6.16 and up).
2189          */
2190         usb_get_intf(sc->intf);
2191
2192         snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2193             sc->dev->bus->busnum, sc->dev->devnum);
2194
2195         /* XXX Verify that we can handle the device (from descriptors) */
2196
2197         if (ub_get_pipes(sc, sc->dev, intf) != 0)
2198                 goto err_dev_desc;
2199
2200         /*
2201          * At this point, all USB initialization is done, do upper layer.
2202          * We really hate halfway initialized structures, so from the
2203          * invariants perspective, this ub_dev is fully constructed at
2204          * this point.
2205          */
2206
2207         /*
2208          * This is needed to clear toggles. It is a problem only if we do
2209          * `rmmod ub && modprobe ub` without disconnects, but we like that.
2210          */
2211 #if 0 /* iPod Mini fails if we do this (big white iPod works) */
2212         ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2213         ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2214 #endif
2215
2216         /*
2217          * The way this is used by the startup code is a little specific.
2218          * A SCSI check causes a USB stall. Our common case code sees it
2219          * and clears the check, after which the device is ready for use.
2220          * But if a check was not present, any command other than
2221          * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2222          *
2223          * If we neglect to clear the SCSI check, the first real command fails
2224          * (which is the capacity readout). We clear that and retry, but why
2225          * causing spurious retries for no reason.
2226          *
2227          * Revalidation may start with its own TEST_UNIT_READY, but that one
2228          * has to succeed, so we clear checks with an additional one here.
2229          * In any case it's not our business how revaliadation is implemented.
2230          */
2231         for (i = 0; i < 3; i++) {  /* Retries for the schwag key from KS'04 */
2232                 if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
2233                 if (rc != 0x6) break;
2234                 msleep(10);
2235         }
2236
2237         nluns = 1;
2238         for (i = 0; i < 3; i++) {
2239                 if ((rc = ub_sync_getmaxlun(sc)) < 0)
2240                         break;
2241                 if (rc != 0) {
2242                         nluns = rc;
2243                         break;
2244                 }
2245                 msleep(100);
2246         }
2247
2248         for (i = 0; i < nluns; i++) {
2249                 ub_probe_lun(sc, i);
2250         }
2251         return 0;
2252
2253 err_dev_desc:
2254         usb_set_intfdata(intf, NULL);
2255         usb_put_intf(sc->intf);
2256         usb_put_dev(sc->dev);
2257         kfree(sc);
2258 err_core:
2259         return rc;
2260 }
2261
2262 static int ub_probe_lun(struct ub_dev *sc, int lnum)
2263 {
2264         struct ub_lun *lun;
2265         request_queue_t *q;
2266         struct gendisk *disk;
2267         int rc;
2268
2269         rc = -ENOMEM;
2270         if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
2271                 goto err_alloc;
2272         lun->num = lnum;
2273
2274         rc = -ENOSR;
2275         if ((lun->id = ub_id_get()) == -1)
2276                 goto err_id;
2277
2278         lun->udev = sc;
2279
2280         snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2281             lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2282
2283         lun->removable = 1;             /* XXX Query this from the device */
2284         lun->changed = 1;               /* ub_revalidate clears only */
2285         ub_revalidate(sc, lun);
2286
2287         rc = -ENOMEM;
2288         if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
2289                 goto err_diskalloc;
2290
2291         sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
2292         disk->major = UB_MAJOR;
2293         disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2294         disk->fops = &ub_bd_fops;
2295         disk->private_data = lun;
2296         disk->driverfs_dev = &sc->intf->dev;
2297
2298         rc = -ENOMEM;
2299         if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2300                 goto err_blkqinit;
2301
2302         disk->queue = q;
2303
2304         blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2305         blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
2306         blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2307         blk_queue_segment_boundary(q, 0xffffffff);      /* Dubious. */
2308         blk_queue_max_sectors(q, UB_MAX_SECTORS);
2309         blk_queue_hardsect_size(q, lun->capacity.bsize);
2310
2311         lun->disk = disk;
2312         q->queuedata = lun;
2313         list_add(&lun->link, &sc->luns);
2314
2315         set_capacity(disk, lun->capacity.nsec);
2316         if (lun->removable)
2317                 disk->flags |= GENHD_FL_REMOVABLE;
2318
2319         add_disk(disk);
2320
2321         return 0;
2322
2323 err_blkqinit:
2324         put_disk(disk);
2325 err_diskalloc:
2326         ub_id_put(lun->id);
2327 err_id:
2328         kfree(lun);
2329 err_alloc:
2330         return rc;
2331 }
2332
2333 static void ub_disconnect(struct usb_interface *intf)
2334 {
2335         struct ub_dev *sc = usb_get_intfdata(intf);
2336         struct list_head *p;
2337         struct ub_lun *lun;
2338         unsigned long flags;
2339
2340         /*
2341          * Prevent ub_bd_release from pulling the rug from under us.
2342          * XXX This is starting to look like a kref.
2343          * XXX Why not to take this ref at probe time?
2344          */
2345         spin_lock_irqsave(&ub_lock, flags);
2346         sc->openc++;
2347         spin_unlock_irqrestore(&ub_lock, flags);
2348
2349         /*
2350          * Fence stall clearnings, operations triggered by unlinkings and so on.
2351          * We do not attempt to unlink any URBs, because we do not trust the
2352          * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2353          */
2354         atomic_set(&sc->poison, 1);
2355
2356         /*
2357          * Wait for reset to end, if any.
2358          */
2359         wait_event(sc->reset_wait, !sc->reset);
2360
2361         /*
2362          * Blow away queued commands.
2363          *
2364          * Actually, this never works, because before we get here
2365          * the HCD terminates outstanding URB(s). It causes our
2366          * SCSI command queue to advance, commands fail to submit,
2367          * and the whole queue drains. So, we just use this code to
2368          * print warnings.
2369          */
2370         spin_lock_irqsave(sc->lock, flags);
2371         {
2372                 struct ub_scsi_cmd *cmd;
2373                 int cnt = 0;
2374                 while ((cmd = ub_cmdq_peek(sc)) != NULL) {
2375                         cmd->error = -ENOTCONN;
2376                         cmd->state = UB_CMDST_DONE;
2377                         ub_cmdq_pop(sc);
2378                         (*cmd->done)(sc, cmd);
2379                         cnt++;
2380                 }
2381                 if (cnt != 0) {
2382                         printk(KERN_WARNING "%s: "
2383                             "%d was queued after shutdown\n", sc->name, cnt);
2384                 }
2385         }
2386         spin_unlock_irqrestore(sc->lock, flags);
2387
2388         /*
2389          * Unregister the upper layer.
2390          */
2391         list_for_each (p, &sc->luns) {
2392                 lun = list_entry(p, struct ub_lun, link);
2393                 del_gendisk(lun->disk);
2394                 /*
2395                  * I wish I could do:
2396                  *    set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
2397                  * As it is, we rely on our internal poisoning and let
2398                  * the upper levels to spin furiously failing all the I/O.
2399                  */
2400         }
2401
2402         /*
2403          * Testing for -EINPROGRESS is always a bug, so we are bending
2404          * the rules a little.
2405          */
2406         spin_lock_irqsave(sc->lock, flags);
2407         if (sc->work_urb.status == -EINPROGRESS) {      /* janitors: ignore */
2408                 printk(KERN_WARNING "%s: "
2409                     "URB is active after disconnect\n", sc->name);
2410         }
2411         spin_unlock_irqrestore(sc->lock, flags);
2412
2413         /*
2414          * There is virtually no chance that other CPU runs times so long
2415          * after ub_urb_complete should have called del_timer, but only if HCD
2416          * didn't forget to deliver a callback on unlink.
2417          */
2418         del_timer_sync(&sc->work_timer);
2419
2420         /*
2421          * At this point there must be no commands coming from anyone
2422          * and no URBs left in transit.
2423          */
2424
2425         ub_put(sc);
2426 }
2427
2428 static struct usb_driver ub_driver = {
2429         .name =         "ub",
2430         .probe =        ub_probe,
2431         .disconnect =   ub_disconnect,
2432         .id_table =     ub_usb_ids,
2433 };
2434
2435 static int __init ub_init(void)
2436 {
2437         int rc;
2438         int i;
2439
2440         for (i = 0; i < UB_QLOCK_NUM; i++)
2441                 spin_lock_init(&ub_qlockv[i]);
2442
2443         if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2444                 goto err_regblkdev;
2445
2446         if ((rc = usb_register(&ub_driver)) != 0)
2447                 goto err_register;
2448
2449         usb_usual_set_present(USB_US_TYPE_UB);
2450         return 0;
2451
2452 err_register:
2453         unregister_blkdev(UB_MAJOR, DRV_NAME);
2454 err_regblkdev:
2455         return rc;
2456 }
2457
2458 static void __exit ub_exit(void)
2459 {
2460         usb_deregister(&ub_driver);
2461
2462         unregister_blkdev(UB_MAJOR, DRV_NAME);
2463         usb_usual_clear_present(USB_US_TYPE_UB);
2464 }
2465
2466 module_init(ub_init);
2467 module_exit(ub_exit);
2468
2469 MODULE_LICENSE("GPL");