libertas: slight cleanup of netif queue stop/wake
[linux-2.6] / drivers / block / ub.c
1 /*
2  * The low performance USB storage driver (ub).
3  *
4  * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
5  * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
6  *
7  * This work is a part of Linux kernel, is derived from it,
8  * and is not licensed separately. See file COPYING for details.
9  *
10  * TODO (sorted by decreasing priority)
11  *  -- set readonly flag for CDs, set removable flag for CF readers
12  *  -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
13  *  -- verify the 13 conditions and do bulk resets
14  *  -- highmem
15  *  -- move top_sense and work_bcs into separate allocations (if they survive)
16  *     for cache purists and esoteric architectures.
17  *  -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
18  *  -- prune comments, they are too volumnous
19  *  -- Resove XXX's
20  *  -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
21  */
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/usb.h>
25 #include <linux/usb_usual.h>
26 #include <linux/blkdev.h>
27 #include <linux/timer.h>
28 #include <linux/scatterlist.h>
29 #include <scsi/scsi.h>
30
31 #define DRV_NAME "ub"
32
33 #define UB_MAJOR 180
34
35 /*
36  * The command state machine is the key model for understanding of this driver.
37  *
38  * The general rule is that all transitions are done towards the bottom
39  * of the diagram, thus preventing any loops.
40  *
41  * An exception to that is how the STAT state is handled. A counter allows it
42  * to be re-entered along the path marked with [C].
43  *
44  *       +--------+
45  *       ! INIT   !
46  *       +--------+
47  *           !
48  *        ub_scsi_cmd_start fails ->--------------------------------------\
49  *           !                                                            !
50  *           V                                                            !
51  *       +--------+                                                       !
52  *       ! CMD    !                                                       !
53  *       +--------+                                                       !
54  *           !                                            +--------+      !
55  *         was -EPIPE -->-------------------------------->! CLEAR  !      !
56  *           !                                            +--------+      !
57  *           !                                                !           !
58  *         was error -->------------------------------------- ! --------->\
59  *           !                                                !           !
60  *  /--<-- cmd->dir == NONE ?                                 !           !
61  *  !        !                                                !           !
62  *  !        V                                                !           !
63  *  !    +--------+                                           !           !
64  *  !    ! DATA   !                                           !           !
65  *  !    +--------+                                           !           !
66  *  !        !                           +---------+          !           !
67  *  !      was -EPIPE -->--------------->! CLR2STS !          !           !
68  *  !        !                           +---------+          !           !
69  *  !        !                                !               !           !
70  *  !        !                              was error -->---- ! --------->\
71  *  !      was error -->--------------------- ! ------------- ! --------->\
72  *  !        !                                !               !           !
73  *  !        V                                !               !           !
74  *  \--->+--------+                           !               !           !
75  *       ! STAT   !<--------------------------/               !           !
76  *  /--->+--------+                                           !           !
77  *  !        !                                                !           !
78  * [C]     was -EPIPE -->-----------\                         !           !
79  *  !        !                      !                         !           !
80  *  +<---- len == 0                 !                         !           !
81  *  !        !                      !                         !           !
82  *  !      was error -->--------------------------------------!---------->\
83  *  !        !                      !                         !           !
84  *  +<---- bad CSW                  !                         !           !
85  *  +<---- bad tag                  !                         !           !
86  *  !        !                      V                         !           !
87  *  !        !                 +--------+                     !           !
88  *  !        !                 ! CLRRS  !                     !           !
89  *  !        !                 +--------+                     !           !
90  *  !        !                      !                         !           !
91  *  \------- ! --------------------[C]--------\               !           !
92  *           !                                !               !           !
93  *         cmd->error---\                +--------+           !           !
94  *           !          +--------------->! SENSE  !<----------/           !
95  *         STAT_FAIL----/                +--------+                       !
96  *           !                                !                           V
97  *           !                                V                      +--------+
98  *           \--------------------------------\--------------------->! DONE   !
99  *                                                                   +--------+
100  */
101
102 /*
103  * This many LUNs per USB device.
104  * Every one of them takes a host, see UB_MAX_HOSTS.
105  */
106 #define UB_MAX_LUNS   9
107
108 /*
109  */
110
111 #define UB_PARTS_PER_LUN      8
112
113 #define UB_MAX_CDB_SIZE      16         /* Corresponds to Bulk */
114
115 #define UB_SENSE_SIZE  18
116
117 /*
118  */
119
120 /* command block wrapper */
121 struct bulk_cb_wrap {
122         __le32  Signature;              /* contains 'USBC' */
123         u32     Tag;                    /* unique per command id */
124         __le32  DataTransferLength;     /* size of data */
125         u8      Flags;                  /* direction in bit 0 */
126         u8      Lun;                    /* LUN */
127         u8      Length;                 /* of of the CDB */
128         u8      CDB[UB_MAX_CDB_SIZE];   /* max command */
129 };
130
131 #define US_BULK_CB_WRAP_LEN     31
132 #define US_BULK_CB_SIGN         0x43425355      /*spells out USBC */
133 #define US_BULK_FLAG_IN         1
134 #define US_BULK_FLAG_OUT        0
135
136 /* command status wrapper */
137 struct bulk_cs_wrap {
138         __le32  Signature;              /* should = 'USBS' */
139         u32     Tag;                    /* same as original command */
140         __le32  Residue;                /* amount not transferred */
141         u8      Status;                 /* see below */
142 };
143
144 #define US_BULK_CS_WRAP_LEN     13
145 #define US_BULK_CS_SIGN         0x53425355      /* spells out 'USBS' */
146 #define US_BULK_STAT_OK         0
147 #define US_BULK_STAT_FAIL       1
148 #define US_BULK_STAT_PHASE      2
149
150 /* bulk-only class specific requests */
151 #define US_BULK_RESET_REQUEST   0xff
152 #define US_BULK_GET_MAX_LUN     0xfe
153
154 /*
155  */
156 struct ub_dev;
157
158 #define UB_MAX_REQ_SG   9       /* cdrecord requires 32KB and maybe a header */
159 #define UB_MAX_SECTORS 64
160
161 /*
162  * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
163  * even if a webcam hogs the bus, but some devices need time to spin up.
164  */
165 #define UB_URB_TIMEOUT  (HZ*2)
166 #define UB_DATA_TIMEOUT (HZ*5)  /* ZIP does spin-ups in the data phase */
167 #define UB_STAT_TIMEOUT (HZ*5)  /* Same spinups and eject for a dataless cmd. */
168 #define UB_CTRL_TIMEOUT (HZ/2)  /* 500ms ought to be enough to clear a stall */
169
170 /*
171  * An instance of a SCSI command in transit.
172  */
173 #define UB_DIR_NONE     0
174 #define UB_DIR_READ     1
175 #define UB_DIR_ILLEGAL2 2
176 #define UB_DIR_WRITE    3
177
178 #define UB_DIR_CHAR(c)  (((c)==UB_DIR_WRITE)? 'w': \
179                          (((c)==UB_DIR_READ)? 'r': 'n'))
180
181 enum ub_scsi_cmd_state {
182         UB_CMDST_INIT,                  /* Initial state */
183         UB_CMDST_CMD,                   /* Command submitted */
184         UB_CMDST_DATA,                  /* Data phase */
185         UB_CMDST_CLR2STS,               /* Clearing before requesting status */
186         UB_CMDST_STAT,                  /* Status phase */
187         UB_CMDST_CLEAR,                 /* Clearing a stall (halt, actually) */
188         UB_CMDST_CLRRS,                 /* Clearing before retrying status */
189         UB_CMDST_SENSE,                 /* Sending Request Sense */
190         UB_CMDST_DONE                   /* Final state */
191 };
192
193 struct ub_scsi_cmd {
194         unsigned char cdb[UB_MAX_CDB_SIZE];
195         unsigned char cdb_len;
196
197         unsigned char dir;              /* 0 - none, 1 - read, 3 - write. */
198         enum ub_scsi_cmd_state state;
199         unsigned int tag;
200         struct ub_scsi_cmd *next;
201
202         int error;                      /* Return code - valid upon done */
203         unsigned int act_len;           /* Return size */
204         unsigned char key, asc, ascq;   /* May be valid if error==-EIO */
205
206         int stat_count;                 /* Retries getting status. */
207
208         unsigned int len;               /* Requested length */
209         unsigned int current_sg;
210         unsigned int nsg;               /* sgv[nsg] */
211         struct scatterlist sgv[UB_MAX_REQ_SG];
212
213         struct ub_lun *lun;
214         void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
215         void *back;
216 };
217
218 struct ub_request {
219         struct request *rq;
220         unsigned int current_try;
221         unsigned int nsg;               /* sgv[nsg] */
222         struct scatterlist sgv[UB_MAX_REQ_SG];
223 };
224
225 /*
226  */
227 struct ub_capacity {
228         unsigned long nsec;             /* Linux size - 512 byte sectors */
229         unsigned int bsize;             /* Linux hardsect_size */
230         unsigned int bshift;            /* Shift between 512 and hard sects */
231 };
232
233 /*
234  * This is a direct take-off from linux/include/completion.h
235  * The difference is that I do not wait on this thing, just poll.
236  * When I want to wait (ub_probe), I just use the stock completion.
237  *
238  * Note that INIT_COMPLETION takes no lock. It is correct. But why
239  * in the bloody hell that thing takes struct instead of pointer to struct
240  * is quite beyond me. I just copied it from the stock completion.
241  */
242 struct ub_completion {
243         unsigned int done;
244         spinlock_t lock;
245 };
246
247 static inline void ub_init_completion(struct ub_completion *x)
248 {
249         x->done = 0;
250         spin_lock_init(&x->lock);
251 }
252
253 #define UB_INIT_COMPLETION(x)   ((x).done = 0)
254
255 static void ub_complete(struct ub_completion *x)
256 {
257         unsigned long flags;
258
259         spin_lock_irqsave(&x->lock, flags);
260         x->done++;
261         spin_unlock_irqrestore(&x->lock, flags);
262 }
263
264 static int ub_is_completed(struct ub_completion *x)
265 {
266         unsigned long flags;
267         int ret;
268
269         spin_lock_irqsave(&x->lock, flags);
270         ret = x->done;
271         spin_unlock_irqrestore(&x->lock, flags);
272         return ret;
273 }
274
275 /*
276  */
277 struct ub_scsi_cmd_queue {
278         int qlen, qmax;
279         struct ub_scsi_cmd *head, *tail;
280 };
281
282 /*
283  * The block device instance (one per LUN).
284  */
285 struct ub_lun {
286         struct ub_dev *udev;
287         struct list_head link;
288         struct gendisk *disk;
289         int id;                         /* Host index */
290         int num;                        /* LUN number */
291         char name[16];
292
293         int changed;                    /* Media was changed */
294         int removable;
295         int readonly;
296
297         struct ub_request urq;
298
299         /* Use Ingo's mempool if or when we have more than one command. */
300         /*
301          * Currently we never need more than one command for the whole device.
302          * However, giving every LUN a command is a cheap and automatic way
303          * to enforce fairness between them.
304          */
305         int cmda[1];
306         struct ub_scsi_cmd cmdv[1];
307
308         struct ub_capacity capacity; 
309 };
310
311 /*
312  * The USB device instance.
313  */
314 struct ub_dev {
315         spinlock_t *lock;
316         atomic_t poison;                /* The USB device is disconnected */
317         int openc;                      /* protected by ub_lock! */
318                                         /* kref is too implicit for our taste */
319         int reset;                      /* Reset is running */
320         unsigned int tagcnt;
321         char name[12];
322         struct usb_device *dev;
323         struct usb_interface *intf;
324
325         struct list_head luns;
326
327         unsigned int send_bulk_pipe;    /* cached pipe values */
328         unsigned int recv_bulk_pipe;
329         unsigned int send_ctrl_pipe;
330         unsigned int recv_ctrl_pipe;
331
332         struct tasklet_struct tasklet;
333
334         struct ub_scsi_cmd_queue cmd_queue;
335         struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
336         unsigned char top_sense[UB_SENSE_SIZE];
337
338         struct ub_completion work_done;
339         struct urb work_urb;
340         struct timer_list work_timer;
341         int last_pipe;                  /* What might need clearing */
342         __le32 signature;               /* Learned signature */
343         struct bulk_cb_wrap work_bcb;
344         struct bulk_cs_wrap work_bcs;
345         struct usb_ctrlrequest work_cr;
346
347         struct work_struct reset_work;
348         wait_queue_head_t reset_wait;
349
350         int sg_stat[6];
351 };
352
353 /*
354  */
355 static void ub_cleanup(struct ub_dev *sc);
356 static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
357 static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
358     struct ub_scsi_cmd *cmd, struct ub_request *urq);
359 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
360     struct ub_scsi_cmd *cmd, struct ub_request *urq);
361 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
362 static void ub_end_rq(struct request *rq, unsigned int status);
363 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
364     struct ub_request *urq, struct ub_scsi_cmd *cmd);
365 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
366 static void ub_urb_complete(struct urb *urb);
367 static void ub_scsi_action(unsigned long _dev);
368 static void ub_scsi_dispatch(struct ub_dev *sc);
369 static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
370 static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
371 static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
372 static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
373 static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
374 static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
375 static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
376 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
377     int stalled_pipe);
378 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
379 static void ub_reset_enter(struct ub_dev *sc, int try);
380 static void ub_reset_task(struct work_struct *work);
381 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
382 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
383     struct ub_capacity *ret);
384 static int ub_sync_reset(struct ub_dev *sc);
385 static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
386 static int ub_probe_lun(struct ub_dev *sc, int lnum);
387
388 /*
389  */
390 #ifdef CONFIG_USB_LIBUSUAL
391
392 #define ub_usb_ids  storage_usb_ids
393 #else
394
395 static struct usb_device_id ub_usb_ids[] = {
396         { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
397         { }
398 };
399
400 MODULE_DEVICE_TABLE(usb, ub_usb_ids);
401 #endif /* CONFIG_USB_LIBUSUAL */
402
403 /*
404  * Find me a way to identify "next free minor" for add_disk(),
405  * and the array disappears the next day. However, the number of
406  * hosts has something to do with the naming and /proc/partitions.
407  * This has to be thought out in detail before changing.
408  * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
409  */
410 #define UB_MAX_HOSTS  26
411 static char ub_hostv[UB_MAX_HOSTS];
412
413 #define UB_QLOCK_NUM 5
414 static spinlock_t ub_qlockv[UB_QLOCK_NUM];
415 static int ub_qlock_next = 0;
416
417 static DEFINE_SPINLOCK(ub_lock);        /* Locks globals and ->openc */
418
419 /*
420  * The id allocator.
421  *
422  * This also stores the host for indexing by minor, which is somewhat dirty.
423  */
424 static int ub_id_get(void)
425 {
426         unsigned long flags;
427         int i;
428
429         spin_lock_irqsave(&ub_lock, flags);
430         for (i = 0; i < UB_MAX_HOSTS; i++) {
431                 if (ub_hostv[i] == 0) {
432                         ub_hostv[i] = 1;
433                         spin_unlock_irqrestore(&ub_lock, flags);
434                         return i;
435                 }
436         }
437         spin_unlock_irqrestore(&ub_lock, flags);
438         return -1;
439 }
440
441 static void ub_id_put(int id)
442 {
443         unsigned long flags;
444
445         if (id < 0 || id >= UB_MAX_HOSTS) {
446                 printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
447                 return;
448         }
449
450         spin_lock_irqsave(&ub_lock, flags);
451         if (ub_hostv[id] == 0) {
452                 spin_unlock_irqrestore(&ub_lock, flags);
453                 printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
454                 return;
455         }
456         ub_hostv[id] = 0;
457         spin_unlock_irqrestore(&ub_lock, flags);
458 }
459
460 /*
461  * This is necessitated by the fact that blk_cleanup_queue does not
462  * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
463  * Since our blk_init_queue() passes a spinlock common with ub_dev,
464  * we have life time issues when ub_cleanup frees ub_dev.
465  */
466 static spinlock_t *ub_next_lock(void)
467 {
468         unsigned long flags;
469         spinlock_t *ret;
470
471         spin_lock_irqsave(&ub_lock, flags);
472         ret = &ub_qlockv[ub_qlock_next];
473         ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
474         spin_unlock_irqrestore(&ub_lock, flags);
475         return ret;
476 }
477
478 /*
479  * Downcount for deallocation. This rides on two assumptions:
480  *  - once something is poisoned, its refcount cannot grow
481  *  - opens cannot happen at this time (del_gendisk was done)
482  * If the above is true, we can drop the lock, which we need for
483  * blk_cleanup_queue(): the silly thing may attempt to sleep.
484  * [Actually, it never needs to sleep for us, but it calls might_sleep()]
485  */
486 static void ub_put(struct ub_dev *sc)
487 {
488         unsigned long flags;
489
490         spin_lock_irqsave(&ub_lock, flags);
491         --sc->openc;
492         if (sc->openc == 0 && atomic_read(&sc->poison)) {
493                 spin_unlock_irqrestore(&ub_lock, flags);
494                 ub_cleanup(sc);
495         } else {
496                 spin_unlock_irqrestore(&ub_lock, flags);
497         }
498 }
499
500 /*
501  * Final cleanup and deallocation.
502  */
503 static void ub_cleanup(struct ub_dev *sc)
504 {
505         struct list_head *p;
506         struct ub_lun *lun;
507         struct request_queue *q;
508
509         while (!list_empty(&sc->luns)) {
510                 p = sc->luns.next;
511                 lun = list_entry(p, struct ub_lun, link);
512                 list_del(p);
513
514                 /* I don't think queue can be NULL. But... Stolen from sx8.c */
515                 if ((q = lun->disk->queue) != NULL)
516                         blk_cleanup_queue(q);
517                 /*
518                  * If we zero disk->private_data BEFORE put_disk, we have
519                  * to check for NULL all over the place in open, release,
520                  * check_media and revalidate, because the block level
521                  * semaphore is well inside the put_disk.
522                  * But we cannot zero after the call, because *disk is gone.
523                  * The sd.c is blatantly racy in this area.
524                  */
525                 /* disk->private_data = NULL; */
526                 put_disk(lun->disk);
527                 lun->disk = NULL;
528
529                 ub_id_put(lun->id);
530                 kfree(lun);
531         }
532
533         usb_set_intfdata(sc->intf, NULL);
534         usb_put_intf(sc->intf);
535         usb_put_dev(sc->dev);
536         kfree(sc);
537 }
538
539 /*
540  * The "command allocator".
541  */
542 static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
543 {
544         struct ub_scsi_cmd *ret;
545
546         if (lun->cmda[0])
547                 return NULL;
548         ret = &lun->cmdv[0];
549         lun->cmda[0] = 1;
550         return ret;
551 }
552
553 static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
554 {
555         if (cmd != &lun->cmdv[0]) {
556                 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
557                     lun->name, cmd);
558                 return;
559         }
560         if (!lun->cmda[0]) {
561                 printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
562                 return;
563         }
564         lun->cmda[0] = 0;
565 }
566
567 /*
568  * The command queue.
569  */
570 static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
571 {
572         struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
573
574         if (t->qlen++ == 0) {
575                 t->head = cmd;
576                 t->tail = cmd;
577         } else {
578                 t->tail->next = cmd;
579                 t->tail = cmd;
580         }
581
582         if (t->qlen > t->qmax)
583                 t->qmax = t->qlen;
584 }
585
586 static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
587 {
588         struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
589
590         if (t->qlen++ == 0) {
591                 t->head = cmd;
592                 t->tail = cmd;
593         } else {
594                 cmd->next = t->head;
595                 t->head = cmd;
596         }
597
598         if (t->qlen > t->qmax)
599                 t->qmax = t->qlen;
600 }
601
602 static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
603 {
604         struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
605         struct ub_scsi_cmd *cmd;
606
607         if (t->qlen == 0)
608                 return NULL;
609         if (--t->qlen == 0)
610                 t->tail = NULL;
611         cmd = t->head;
612         t->head = cmd->next;
613         cmd->next = NULL;
614         return cmd;
615 }
616
617 #define ub_cmdq_peek(sc)  ((sc)->cmd_queue.head)
618
619 /*
620  * The request function is our main entry point
621  */
622
623 static void ub_request_fn(struct request_queue *q)
624 {
625         struct ub_lun *lun = q->queuedata;
626         struct request *rq;
627
628         while ((rq = elv_next_request(q)) != NULL) {
629                 if (ub_request_fn_1(lun, rq) != 0) {
630                         blk_stop_queue(q);
631                         break;
632                 }
633         }
634 }
635
636 static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
637 {
638         struct ub_dev *sc = lun->udev;
639         struct ub_scsi_cmd *cmd;
640         struct ub_request *urq;
641         int n_elem;
642
643         if (atomic_read(&sc->poison)) {
644                 blkdev_dequeue_request(rq);
645                 ub_end_rq(rq, DID_NO_CONNECT << 16);
646                 return 0;
647         }
648
649         if (lun->changed && !blk_pc_request(rq)) {
650                 blkdev_dequeue_request(rq);
651                 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
652                 return 0;
653         }
654
655         if (lun->urq.rq != NULL)
656                 return -1;
657         if ((cmd = ub_get_cmd(lun)) == NULL)
658                 return -1;
659         memset(cmd, 0, sizeof(struct ub_scsi_cmd));
660         sg_init_table(cmd->sgv, UB_MAX_REQ_SG);
661
662         blkdev_dequeue_request(rq);
663
664         urq = &lun->urq;
665         memset(urq, 0, sizeof(struct ub_request));
666         urq->rq = rq;
667
668         /*
669          * get scatterlist from block layer
670          */
671         n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
672         if (n_elem < 0) {
673                 /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
674                 printk(KERN_INFO "%s: failed request map (%d)\n",
675                     lun->name, n_elem);
676                 goto drop;
677         }
678         if (n_elem > UB_MAX_REQ_SG) {   /* Paranoia */
679                 printk(KERN_WARNING "%s: request with %d segments\n",
680                     lun->name, n_elem);
681                 goto drop;
682         }
683         urq->nsg = n_elem;
684         sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
685
686         if (blk_pc_request(rq)) {
687                 ub_cmd_build_packet(sc, lun, cmd, urq);
688         } else {
689                 ub_cmd_build_block(sc, lun, cmd, urq);
690         }
691         cmd->state = UB_CMDST_INIT;
692         cmd->lun = lun;
693         cmd->done = ub_rw_cmd_done;
694         cmd->back = urq;
695
696         cmd->tag = sc->tagcnt++;
697         if (ub_submit_scsi(sc, cmd) != 0)
698                 goto drop;
699
700         return 0;
701
702 drop:
703         ub_put_cmd(lun, cmd);
704         ub_end_rq(rq, DID_ERROR << 16);
705         return 0;
706 }
707
708 static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
709     struct ub_scsi_cmd *cmd, struct ub_request *urq)
710 {
711         struct request *rq = urq->rq;
712         unsigned int block, nblks;
713
714         if (rq_data_dir(rq) == WRITE)
715                 cmd->dir = UB_DIR_WRITE;
716         else
717                 cmd->dir = UB_DIR_READ;
718
719         cmd->nsg = urq->nsg;
720         memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
721
722         /*
723          * build the command
724          *
725          * The call to blk_queue_hardsect_size() guarantees that request
726          * is aligned, but it is given in terms of 512 byte units, always.
727          */
728         block = rq->sector >> lun->capacity.bshift;
729         nblks = rq->nr_sectors >> lun->capacity.bshift;
730
731         cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
732         /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
733         cmd->cdb[2] = block >> 24;
734         cmd->cdb[3] = block >> 16;
735         cmd->cdb[4] = block >> 8;
736         cmd->cdb[5] = block;
737         cmd->cdb[7] = nblks >> 8;
738         cmd->cdb[8] = nblks;
739         cmd->cdb_len = 10;
740
741         cmd->len = rq->nr_sectors * 512;
742 }
743
744 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
745     struct ub_scsi_cmd *cmd, struct ub_request *urq)
746 {
747         struct request *rq = urq->rq;
748
749         if (rq->data_len == 0) {
750                 cmd->dir = UB_DIR_NONE;
751         } else {
752                 if (rq_data_dir(rq) == WRITE)
753                         cmd->dir = UB_DIR_WRITE;
754                 else
755                         cmd->dir = UB_DIR_READ;
756         }
757
758         cmd->nsg = urq->nsg;
759         memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
760
761         memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
762         cmd->cdb_len = rq->cmd_len;
763
764         cmd->len = rq->data_len;
765 }
766
767 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
768 {
769         struct ub_lun *lun = cmd->lun;
770         struct ub_request *urq = cmd->back;
771         struct request *rq;
772         unsigned int scsi_status;
773
774         rq = urq->rq;
775
776         if (cmd->error == 0) {
777                 if (blk_pc_request(rq)) {
778                         if (cmd->act_len >= rq->data_len)
779                                 rq->data_len = 0;
780                         else
781                                 rq->data_len -= cmd->act_len;
782                 }
783                 scsi_status = 0;
784         } else {
785                 if (blk_pc_request(rq)) {
786                         /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
787                         memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
788                         rq->sense_len = UB_SENSE_SIZE;
789                         if (sc->top_sense[0] != 0)
790                                 scsi_status = SAM_STAT_CHECK_CONDITION;
791                         else
792                                 scsi_status = DID_ERROR << 16;
793                 } else {
794                         if (cmd->error == -EIO) {
795                                 if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
796                                         return;
797                         }
798                         scsi_status = SAM_STAT_CHECK_CONDITION;
799                 }
800         }
801
802         urq->rq = NULL;
803
804         ub_put_cmd(lun, cmd);
805         ub_end_rq(rq, scsi_status);
806         blk_start_queue(lun->disk->queue);
807 }
808
809 static void ub_end_rq(struct request *rq, unsigned int scsi_status)
810 {
811         int error;
812
813         if (scsi_status == 0) {
814                 error = 0;
815         } else {
816                 error = -EIO;
817                 rq->errors = scsi_status;
818         }
819         if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
820                 BUG();
821 }
822
823 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
824     struct ub_request *urq, struct ub_scsi_cmd *cmd)
825 {
826
827         if (atomic_read(&sc->poison))
828                 return -ENXIO;
829
830         ub_reset_enter(sc, urq->current_try);
831
832         if (urq->current_try >= 3)
833                 return -EIO;
834         urq->current_try++;
835
836         /* Remove this if anyone complains of flooding. */
837         printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
838             "[sense %x %02x %02x] retry %d\n",
839             sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
840             cmd->key, cmd->asc, cmd->ascq, urq->current_try);
841
842         memset(cmd, 0, sizeof(struct ub_scsi_cmd));
843         ub_cmd_build_block(sc, lun, cmd, urq);
844
845         cmd->state = UB_CMDST_INIT;
846         cmd->lun = lun;
847         cmd->done = ub_rw_cmd_done;
848         cmd->back = urq;
849
850         cmd->tag = sc->tagcnt++;
851
852 #if 0 /* Wasteful */
853         return ub_submit_scsi(sc, cmd);
854 #else
855         ub_cmdq_add(sc, cmd);
856         return 0;
857 #endif
858 }
859
860 /*
861  * Submit a regular SCSI operation (not an auto-sense).
862  *
863  * The Iron Law of Good Submit Routine is:
864  * Zero return - callback is done, Nonzero return - callback is not done.
865  * No exceptions.
866  *
867  * Host is assumed locked.
868  */
869 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
870 {
871
872         if (cmd->state != UB_CMDST_INIT ||
873             (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
874                 return -EINVAL;
875         }
876
877         ub_cmdq_add(sc, cmd);
878         /*
879          * We can call ub_scsi_dispatch(sc) right away here, but it's a little
880          * safer to jump to a tasklet, in case upper layers do something silly.
881          */
882         tasklet_schedule(&sc->tasklet);
883         return 0;
884 }
885
886 /*
887  * Submit the first URB for the queued command.
888  * This function does not deal with queueing in any way.
889  */
890 static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
891 {
892         struct bulk_cb_wrap *bcb;
893         int rc;
894
895         bcb = &sc->work_bcb;
896
897         /*
898          * ``If the allocation length is eighteen or greater, and a device
899          * server returns less than eithteen bytes of data, the application
900          * client should assume that the bytes not transferred would have been
901          * zeroes had the device server returned those bytes.''
902          *
903          * We zero sense for all commands so that when a packet request
904          * fails it does not return a stale sense.
905          */
906         memset(&sc->top_sense, 0, UB_SENSE_SIZE);
907
908         /* set up the command wrapper */
909         bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
910         bcb->Tag = cmd->tag;            /* Endianness is not important */
911         bcb->DataTransferLength = cpu_to_le32(cmd->len);
912         bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
913         bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
914         bcb->Length = cmd->cdb_len;
915
916         /* copy the command payload */
917         memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
918
919         UB_INIT_COMPLETION(sc->work_done);
920
921         sc->last_pipe = sc->send_bulk_pipe;
922         usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
923             bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
924
925         /* Fill what we shouldn't be filling, because usb-storage did so. */
926         sc->work_urb.actual_length = 0;
927         sc->work_urb.error_count = 0;
928         sc->work_urb.status = 0;
929
930         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
931                 /* XXX Clear stalls */
932                 ub_complete(&sc->work_done);
933                 return rc;
934         }
935
936         sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
937         add_timer(&sc->work_timer);
938
939         cmd->state = UB_CMDST_CMD;
940         return 0;
941 }
942
943 /*
944  * Timeout handler.
945  */
946 static void ub_urb_timeout(unsigned long arg)
947 {
948         struct ub_dev *sc = (struct ub_dev *) arg;
949         unsigned long flags;
950
951         spin_lock_irqsave(sc->lock, flags);
952         if (!ub_is_completed(&sc->work_done))
953                 usb_unlink_urb(&sc->work_urb);
954         spin_unlock_irqrestore(sc->lock, flags);
955 }
956
957 /*
958  * Completion routine for the work URB.
959  *
960  * This can be called directly from usb_submit_urb (while we have
961  * the sc->lock taken) and from an interrupt (while we do NOT have
962  * the sc->lock taken). Therefore, bounce this off to a tasklet.
963  */
964 static void ub_urb_complete(struct urb *urb)
965 {
966         struct ub_dev *sc = urb->context;
967
968         ub_complete(&sc->work_done);
969         tasklet_schedule(&sc->tasklet);
970 }
971
972 static void ub_scsi_action(unsigned long _dev)
973 {
974         struct ub_dev *sc = (struct ub_dev *) _dev;
975         unsigned long flags;
976
977         spin_lock_irqsave(sc->lock, flags);
978         ub_scsi_dispatch(sc);
979         spin_unlock_irqrestore(sc->lock, flags);
980 }
981
982 static void ub_scsi_dispatch(struct ub_dev *sc)
983 {
984         struct ub_scsi_cmd *cmd;
985         int rc;
986
987         while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
988                 if (cmd->state == UB_CMDST_DONE) {
989                         ub_cmdq_pop(sc);
990                         (*cmd->done)(sc, cmd);
991                 } else if (cmd->state == UB_CMDST_INIT) {
992                         if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
993                                 break;
994                         cmd->error = rc;
995                         cmd->state = UB_CMDST_DONE;
996                 } else {
997                         if (!ub_is_completed(&sc->work_done))
998                                 break;
999                         del_timer(&sc->work_timer);
1000                         ub_scsi_urb_compl(sc, cmd);
1001                 }
1002         }
1003 }
1004
1005 static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1006 {
1007         struct urb *urb = &sc->work_urb;
1008         struct bulk_cs_wrap *bcs;
1009         int len;
1010         int rc;
1011
1012         if (atomic_read(&sc->poison)) {
1013                 ub_state_done(sc, cmd, -ENODEV);
1014                 return;
1015         }
1016
1017         if (cmd->state == UB_CMDST_CLEAR) {
1018                 if (urb->status == -EPIPE) {
1019                         /*
1020                          * STALL while clearning STALL.
1021                          * The control pipe clears itself - nothing to do.
1022                          */
1023                         printk(KERN_NOTICE "%s: stall on control pipe\n",
1024                             sc->name);
1025                         goto Bad_End;
1026                 }
1027
1028                 /*
1029                  * We ignore the result for the halt clear.
1030                  */
1031
1032                 /* reset the endpoint toggle */
1033                 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1034                         usb_pipeout(sc->last_pipe), 0);
1035
1036                 ub_state_sense(sc, cmd);
1037
1038         } else if (cmd->state == UB_CMDST_CLR2STS) {
1039                 if (urb->status == -EPIPE) {
1040                         printk(KERN_NOTICE "%s: stall on control pipe\n",
1041                             sc->name);
1042                         goto Bad_End;
1043                 }
1044
1045                 /*
1046                  * We ignore the result for the halt clear.
1047                  */
1048
1049                 /* reset the endpoint toggle */
1050                 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1051                         usb_pipeout(sc->last_pipe), 0);
1052
1053                 ub_state_stat(sc, cmd);
1054
1055         } else if (cmd->state == UB_CMDST_CLRRS) {
1056                 if (urb->status == -EPIPE) {
1057                         printk(KERN_NOTICE "%s: stall on control pipe\n",
1058                             sc->name);
1059                         goto Bad_End;
1060                 }
1061
1062                 /*
1063                  * We ignore the result for the halt clear.
1064                  */
1065
1066                 /* reset the endpoint toggle */
1067                 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1068                         usb_pipeout(sc->last_pipe), 0);
1069
1070                 ub_state_stat_counted(sc, cmd);
1071
1072         } else if (cmd->state == UB_CMDST_CMD) {
1073                 switch (urb->status) {
1074                 case 0:
1075                         break;
1076                 case -EOVERFLOW:
1077                         goto Bad_End;
1078                 case -EPIPE:
1079                         rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1080                         if (rc != 0) {
1081                                 printk(KERN_NOTICE "%s: "
1082                                     "unable to submit clear (%d)\n",
1083                                     sc->name, rc);
1084                                 /*
1085                                  * This is typically ENOMEM or some other such shit.
1086                                  * Retrying is pointless. Just do Bad End on it...
1087                                  */
1088                                 ub_state_done(sc, cmd, rc);
1089                                 return;
1090                         }
1091                         cmd->state = UB_CMDST_CLEAR;
1092                         return;
1093                 case -ESHUTDOWN:        /* unplug */
1094                 case -EILSEQ:           /* unplug timeout on uhci */
1095                         ub_state_done(sc, cmd, -ENODEV);
1096                         return;
1097                 default:
1098                         goto Bad_End;
1099                 }
1100                 if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1101                         goto Bad_End;
1102                 }
1103
1104                 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1105                         ub_state_stat(sc, cmd);
1106                         return;
1107                 }
1108
1109                 // udelay(125);         // usb-storage has this
1110                 ub_data_start(sc, cmd);
1111
1112         } else if (cmd->state == UB_CMDST_DATA) {
1113                 if (urb->status == -EPIPE) {
1114                         rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1115                         if (rc != 0) {
1116                                 printk(KERN_NOTICE "%s: "
1117                                     "unable to submit clear (%d)\n",
1118                                     sc->name, rc);
1119                                 ub_state_done(sc, cmd, rc);
1120                                 return;
1121                         }
1122                         cmd->state = UB_CMDST_CLR2STS;
1123                         return;
1124                 }
1125                 if (urb->status == -EOVERFLOW) {
1126                         /*
1127                          * A babble? Failure, but we must transfer CSW now.
1128                          */
1129                         cmd->error = -EOVERFLOW;        /* A cheap trick... */
1130                         ub_state_stat(sc, cmd);
1131                         return;
1132                 }
1133
1134                 if (cmd->dir == UB_DIR_WRITE) {
1135                         /*
1136                          * Do not continue writes in case of a failure.
1137                          * Doing so would cause sectors to be mixed up,
1138                          * which is worse than sectors lost.
1139                          *
1140                          * We must try to read the CSW, or many devices
1141                          * get confused.
1142                          */
1143                         len = urb->actual_length;
1144                         if (urb->status != 0 ||
1145                             len != cmd->sgv[cmd->current_sg].length) {
1146                                 cmd->act_len += len;
1147
1148                                 cmd->error = -EIO;
1149                                 ub_state_stat(sc, cmd);
1150                                 return;
1151                         }
1152
1153                 } else {
1154                         /*
1155                          * If an error occurs on read, we record it, and
1156                          * continue to fetch data in order to avoid bubble.
1157                          *
1158                          * As a small shortcut, we stop if we detect that
1159                          * a CSW mixed into data.
1160                          */
1161                         if (urb->status != 0)
1162                                 cmd->error = -EIO;
1163
1164                         len = urb->actual_length;
1165                         if (urb->status != 0 ||
1166                             len != cmd->sgv[cmd->current_sg].length) {
1167                                 if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1168                                         goto Bad_End;
1169                         }
1170                 }
1171
1172                 cmd->act_len += urb->actual_length;
1173
1174                 if (++cmd->current_sg < cmd->nsg) {
1175                         ub_data_start(sc, cmd);
1176                         return;
1177                 }
1178                 ub_state_stat(sc, cmd);
1179
1180         } else if (cmd->state == UB_CMDST_STAT) {
1181                 if (urb->status == -EPIPE) {
1182                         rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1183                         if (rc != 0) {
1184                                 printk(KERN_NOTICE "%s: "
1185                                     "unable to submit clear (%d)\n",
1186                                     sc->name, rc);
1187                                 ub_state_done(sc, cmd, rc);
1188                                 return;
1189                         }
1190
1191                         /*
1192                          * Having a stall when getting CSW is an error, so
1193                          * make sure uppper levels are not oblivious to it.
1194                          */
1195                         cmd->error = -EIO;              /* A cheap trick... */
1196
1197                         cmd->state = UB_CMDST_CLRRS;
1198                         return;
1199                 }
1200
1201                 /* Catch everything, including -EOVERFLOW and other nasties. */
1202                 if (urb->status != 0)
1203                         goto Bad_End;
1204
1205                 if (urb->actual_length == 0) {
1206                         ub_state_stat_counted(sc, cmd);
1207                         return;
1208                 }
1209
1210                 /*
1211                  * Check the returned Bulk protocol status.
1212                  * The status block has to be validated first.
1213                  */
1214
1215                 bcs = &sc->work_bcs;
1216
1217                 if (sc->signature == cpu_to_le32(0)) {
1218                         /*
1219                          * This is the first reply, so do not perform the check.
1220                          * Instead, remember the signature the device uses
1221                          * for future checks. But do not allow a nul.
1222                          */
1223                         sc->signature = bcs->Signature;
1224                         if (sc->signature == cpu_to_le32(0)) {
1225                                 ub_state_stat_counted(sc, cmd);
1226                                 return;
1227                         }
1228                 } else {
1229                         if (bcs->Signature != sc->signature) {
1230                                 ub_state_stat_counted(sc, cmd);
1231                                 return;
1232                         }
1233                 }
1234
1235                 if (bcs->Tag != cmd->tag) {
1236                         /*
1237                          * This usually happens when we disagree with the
1238                          * device's microcode about something. For instance,
1239                          * a few of them throw this after timeouts. They buffer
1240                          * commands and reply at commands we timed out before.
1241                          * Without flushing these replies we loop forever.
1242                          */
1243                         ub_state_stat_counted(sc, cmd);
1244                         return;
1245                 }
1246
1247                 len = le32_to_cpu(bcs->Residue);
1248                 if (len != cmd->len - cmd->act_len) {
1249                         /*
1250                          * It is all right to transfer less, the caller has
1251                          * to check. But it's not all right if the device
1252                          * counts disagree with our counts.
1253                          */
1254                         goto Bad_End;
1255                 }
1256
1257                 switch (bcs->Status) {
1258                 case US_BULK_STAT_OK:
1259                         break;
1260                 case US_BULK_STAT_FAIL:
1261                         ub_state_sense(sc, cmd);
1262                         return;
1263                 case US_BULK_STAT_PHASE:
1264                         goto Bad_End;
1265                 default:
1266                         printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1267                             sc->name, bcs->Status);
1268                         ub_state_done(sc, cmd, -EINVAL);
1269                         return;
1270                 }
1271
1272                 /* Not zeroing error to preserve a babble indicator */
1273                 if (cmd->error != 0) {
1274                         ub_state_sense(sc, cmd);
1275                         return;
1276                 }
1277                 cmd->state = UB_CMDST_DONE;
1278                 ub_cmdq_pop(sc);
1279                 (*cmd->done)(sc, cmd);
1280
1281         } else if (cmd->state == UB_CMDST_SENSE) {
1282                 ub_state_done(sc, cmd, -EIO);
1283
1284         } else {
1285                 printk(KERN_WARNING "%s: "
1286                     "wrong command state %d\n",
1287                     sc->name, cmd->state);
1288                 ub_state_done(sc, cmd, -EINVAL);
1289                 return;
1290         }
1291         return;
1292
1293 Bad_End: /* Little Excel is dead */
1294         ub_state_done(sc, cmd, -EIO);
1295 }
1296
1297 /*
1298  * Factorization helper for the command state machine:
1299  * Initiate a data segment transfer.
1300  */
1301 static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1302 {
1303         struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1304         int pipe;
1305         int rc;
1306
1307         UB_INIT_COMPLETION(sc->work_done);
1308
1309         if (cmd->dir == UB_DIR_READ)
1310                 pipe = sc->recv_bulk_pipe;
1311         else
1312                 pipe = sc->send_bulk_pipe;
1313         sc->last_pipe = pipe;
1314         usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
1315             sg->length, ub_urb_complete, sc);
1316         sc->work_urb.actual_length = 0;
1317         sc->work_urb.error_count = 0;
1318         sc->work_urb.status = 0;
1319
1320         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1321                 /* XXX Clear stalls */
1322                 ub_complete(&sc->work_done);
1323                 ub_state_done(sc, cmd, rc);
1324                 return;
1325         }
1326
1327         sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1328         add_timer(&sc->work_timer);
1329
1330         cmd->state = UB_CMDST_DATA;
1331 }
1332
1333 /*
1334  * Factorization helper for the command state machine:
1335  * Finish the command.
1336  */
1337 static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1338 {
1339
1340         cmd->error = rc;
1341         cmd->state = UB_CMDST_DONE;
1342         ub_cmdq_pop(sc);
1343         (*cmd->done)(sc, cmd);
1344 }
1345
1346 /*
1347  * Factorization helper for the command state machine:
1348  * Submit a CSW read.
1349  */
1350 static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1351 {
1352         int rc;
1353
1354         UB_INIT_COMPLETION(sc->work_done);
1355
1356         sc->last_pipe = sc->recv_bulk_pipe;
1357         usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1358             &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1359         sc->work_urb.actual_length = 0;
1360         sc->work_urb.error_count = 0;
1361         sc->work_urb.status = 0;
1362
1363         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1364                 /* XXX Clear stalls */
1365                 ub_complete(&sc->work_done);
1366                 ub_state_done(sc, cmd, rc);
1367                 return -1;
1368         }
1369
1370         sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1371         add_timer(&sc->work_timer);
1372         return 0;
1373 }
1374
1375 /*
1376  * Factorization helper for the command state machine:
1377  * Submit a CSW read and go to STAT state.
1378  */
1379 static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1380 {
1381
1382         if (__ub_state_stat(sc, cmd) != 0)
1383                 return;
1384
1385         cmd->stat_count = 0;
1386         cmd->state = UB_CMDST_STAT;
1387 }
1388
1389 /*
1390  * Factorization helper for the command state machine:
1391  * Submit a CSW read and go to STAT state with counter (along [C] path).
1392  */
1393 static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1394 {
1395
1396         if (++cmd->stat_count >= 4) {
1397                 ub_state_sense(sc, cmd);
1398                 return;
1399         }
1400
1401         if (__ub_state_stat(sc, cmd) != 0)
1402                 return;
1403
1404         cmd->state = UB_CMDST_STAT;
1405 }
1406
1407 /*
1408  * Factorization helper for the command state machine:
1409  * Submit a REQUEST SENSE and go to SENSE state.
1410  */
1411 static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1412 {
1413         struct ub_scsi_cmd *scmd;
1414         struct scatterlist *sg;
1415         int rc;
1416
1417         if (cmd->cdb[0] == REQUEST_SENSE) {
1418                 rc = -EPIPE;
1419                 goto error;
1420         }
1421
1422         scmd = &sc->top_rqs_cmd;
1423         memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1424         scmd->cdb[0] = REQUEST_SENSE;
1425         scmd->cdb[4] = UB_SENSE_SIZE;
1426         scmd->cdb_len = 6;
1427         scmd->dir = UB_DIR_READ;
1428         scmd->state = UB_CMDST_INIT;
1429         scmd->nsg = 1;
1430         sg = &scmd->sgv[0];
1431         sg_init_table(sg, UB_MAX_REQ_SG);
1432         sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE,
1433                         (unsigned long)sc->top_sense & (PAGE_SIZE-1));
1434         scmd->len = UB_SENSE_SIZE;
1435         scmd->lun = cmd->lun;
1436         scmd->done = ub_top_sense_done;
1437         scmd->back = cmd;
1438
1439         scmd->tag = sc->tagcnt++;
1440
1441         cmd->state = UB_CMDST_SENSE;
1442
1443         ub_cmdq_insert(sc, scmd);
1444         return;
1445
1446 error:
1447         ub_state_done(sc, cmd, rc);
1448 }
1449
1450 /*
1451  * A helper for the command's state machine:
1452  * Submit a stall clear.
1453  */
1454 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1455     int stalled_pipe)
1456 {
1457         int endp;
1458         struct usb_ctrlrequest *cr;
1459         int rc;
1460
1461         endp = usb_pipeendpoint(stalled_pipe);
1462         if (usb_pipein (stalled_pipe))
1463                 endp |= USB_DIR_IN;
1464
1465         cr = &sc->work_cr;
1466         cr->bRequestType = USB_RECIP_ENDPOINT;
1467         cr->bRequest = USB_REQ_CLEAR_FEATURE;
1468         cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1469         cr->wIndex = cpu_to_le16(endp);
1470         cr->wLength = cpu_to_le16(0);
1471
1472         UB_INIT_COMPLETION(sc->work_done);
1473
1474         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1475             (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1476         sc->work_urb.actual_length = 0;
1477         sc->work_urb.error_count = 0;
1478         sc->work_urb.status = 0;
1479
1480         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1481                 ub_complete(&sc->work_done);
1482                 return rc;
1483         }
1484
1485         sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1486         add_timer(&sc->work_timer);
1487         return 0;
1488 }
1489
1490 /*
1491  */
1492 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1493 {
1494         unsigned char *sense = sc->top_sense;
1495         struct ub_scsi_cmd *cmd;
1496
1497         /*
1498          * Find the command which triggered the unit attention or a check,
1499          * save the sense into it, and advance its state machine.
1500          */
1501         if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1502                 printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1503                 return;
1504         }
1505         if (cmd != scmd->back) {
1506                 printk(KERN_WARNING "%s: "
1507                     "sense done for wrong command 0x%x\n",
1508                     sc->name, cmd->tag);
1509                 return;
1510         }
1511         if (cmd->state != UB_CMDST_SENSE) {
1512                 printk(KERN_WARNING "%s: "
1513                     "sense done with bad cmd state %d\n",
1514                     sc->name, cmd->state);
1515                 return;
1516         }
1517
1518         /*
1519          * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1520          */
1521         cmd->key = sense[2] & 0x0F;
1522         cmd->asc = sense[12];
1523         cmd->ascq = sense[13];
1524
1525         ub_scsi_urb_compl(sc, cmd);
1526 }
1527
1528 /*
1529  * Reset management
1530  * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing.
1531  * XXX Make usb_sync_reset asynchronous.
1532  */
1533
1534 static void ub_reset_enter(struct ub_dev *sc, int try)
1535 {
1536
1537         if (sc->reset) {
1538                 /* This happens often on multi-LUN devices. */
1539                 return;
1540         }
1541         sc->reset = try + 1;
1542
1543 #if 0 /* Not needed because the disconnect waits for us. */
1544         unsigned long flags;
1545         spin_lock_irqsave(&ub_lock, flags);
1546         sc->openc++;
1547         spin_unlock_irqrestore(&ub_lock, flags);
1548 #endif
1549
1550 #if 0 /* We let them stop themselves. */
1551         struct ub_lun *lun;
1552         list_for_each_entry(lun, &sc->luns, link) {
1553                 blk_stop_queue(lun->disk->queue);
1554         }
1555 #endif
1556
1557         schedule_work(&sc->reset_work);
1558 }
1559
1560 static void ub_reset_task(struct work_struct *work)
1561 {
1562         struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
1563         unsigned long flags;
1564         struct ub_lun *lun;
1565         int lkr, rc;
1566
1567         if (!sc->reset) {
1568                 printk(KERN_WARNING "%s: Running reset unrequested\n",
1569                     sc->name);
1570                 return;
1571         }
1572
1573         if (atomic_read(&sc->poison)) {
1574                 ;
1575         } else if ((sc->reset & 1) == 0) {
1576                 ub_sync_reset(sc);
1577                 msleep(700);    /* usb-storage sleeps 6s (!) */
1578                 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1579                 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1580         } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1581                 ;
1582         } else {
1583                 if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) {
1584                         printk(KERN_NOTICE
1585                             "%s: usb_lock_device_for_reset failed (%d)\n",
1586                             sc->name, lkr);
1587                 } else {
1588                         rc = usb_reset_device(sc->dev);
1589                         if (rc < 0) {
1590                                 printk(KERN_NOTICE "%s: "
1591                                     "usb_lock_device_for_reset failed (%d)\n",
1592                                     sc->name, rc);
1593                         }
1594
1595                         if (lkr)
1596                                 usb_unlock_device(sc->dev);
1597                 }
1598         }
1599
1600         /*
1601          * In theory, no commands can be running while reset is active,
1602          * so nobody can ask for another reset, and so we do not need any
1603          * queues of resets or anything. We do need a spinlock though,
1604          * to interact with block layer.
1605          */
1606         spin_lock_irqsave(sc->lock, flags);
1607         sc->reset = 0;
1608         tasklet_schedule(&sc->tasklet);
1609         list_for_each_entry(lun, &sc->luns, link) {
1610                 blk_start_queue(lun->disk->queue);
1611         }
1612         wake_up(&sc->reset_wait);
1613         spin_unlock_irqrestore(sc->lock, flags);
1614 }
1615
1616 /*
1617  * This is called from a process context.
1618  */
1619 static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1620 {
1621
1622         lun->readonly = 0;      /* XXX Query this from the device */
1623
1624         lun->capacity.nsec = 0;
1625         lun->capacity.bsize = 512;
1626         lun->capacity.bshift = 0;
1627
1628         if (ub_sync_tur(sc, lun) != 0)
1629                 return;                 /* Not ready */
1630         lun->changed = 0;
1631
1632         if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1633                 /*
1634                  * The retry here means something is wrong, either with the
1635                  * device, with the transport, or with our code.
1636                  * We keep this because sd.c has retries for capacity.
1637                  */
1638                 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1639                         lun->capacity.nsec = 0;
1640                         lun->capacity.bsize = 512;
1641                         lun->capacity.bshift = 0;
1642                 }
1643         }
1644 }
1645
1646 /*
1647  * The open funcion.
1648  * This is mostly needed to keep refcounting, but also to support
1649  * media checks on removable media drives.
1650  */
1651 static int ub_bd_open(struct inode *inode, struct file *filp)
1652 {
1653         struct gendisk *disk = inode->i_bdev->bd_disk;
1654         struct ub_lun *lun = disk->private_data;
1655         struct ub_dev *sc = lun->udev;
1656         unsigned long flags;
1657         int rc;
1658
1659         spin_lock_irqsave(&ub_lock, flags);
1660         if (atomic_read(&sc->poison)) {
1661                 spin_unlock_irqrestore(&ub_lock, flags);
1662                 return -ENXIO;
1663         }
1664         sc->openc++;
1665         spin_unlock_irqrestore(&ub_lock, flags);
1666
1667         if (lun->removable || lun->readonly)
1668                 check_disk_change(inode->i_bdev);
1669
1670         /*
1671          * The sd.c considers ->media_present and ->changed not equivalent,
1672          * under some pretty murky conditions (a failure of READ CAPACITY).
1673          * We may need it one day.
1674          */
1675         if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) {
1676                 rc = -ENOMEDIUM;
1677                 goto err_open;
1678         }
1679
1680         if (lun->readonly && (filp->f_mode & FMODE_WRITE)) {
1681                 rc = -EROFS;
1682                 goto err_open;
1683         }
1684
1685         return 0;
1686
1687 err_open:
1688         ub_put(sc);
1689         return rc;
1690 }
1691
1692 /*
1693  */
1694 static int ub_bd_release(struct inode *inode, struct file *filp)
1695 {
1696         struct gendisk *disk = inode->i_bdev->bd_disk;
1697         struct ub_lun *lun = disk->private_data;
1698         struct ub_dev *sc = lun->udev;
1699
1700         ub_put(sc);
1701         return 0;
1702 }
1703
1704 /*
1705  * The ioctl interface.
1706  */
1707 static int ub_bd_ioctl(struct inode *inode, struct file *filp,
1708     unsigned int cmd, unsigned long arg)
1709 {
1710         struct gendisk *disk = inode->i_bdev->bd_disk;
1711         void __user *usermem = (void __user *) arg;
1712
1713         return scsi_cmd_ioctl(filp, disk->queue, disk, cmd, usermem);
1714 }
1715
1716 /*
1717  * This is called once a new disk was seen by the block layer or by ub_probe().
1718  * The main onjective here is to discover the features of the media such as
1719  * the capacity, read-only status, etc. USB storage generally does not
1720  * need to be spun up, but if we needed it, this would be the place.
1721  *
1722  * This call can sleep.
1723  *
1724  * The return code is not used.
1725  */
1726 static int ub_bd_revalidate(struct gendisk *disk)
1727 {
1728         struct ub_lun *lun = disk->private_data;
1729
1730         ub_revalidate(lun->udev, lun);
1731
1732         /* XXX Support sector size switching like in sr.c */
1733         blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
1734         set_capacity(disk, lun->capacity.nsec);
1735         // set_disk_ro(sdkp->disk, lun->readonly);
1736
1737         return 0;
1738 }
1739
1740 /*
1741  * The check is called by the block layer to verify if the media
1742  * is still available. It is supposed to be harmless, lightweight and
1743  * non-intrusive in case the media was not changed.
1744  *
1745  * This call can sleep.
1746  *
1747  * The return code is bool!
1748  */
1749 static int ub_bd_media_changed(struct gendisk *disk)
1750 {
1751         struct ub_lun *lun = disk->private_data;
1752
1753         if (!lun->removable)
1754                 return 0;
1755
1756         /*
1757          * We clean checks always after every command, so this is not
1758          * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1759          * the device is actually not ready with operator or software
1760          * intervention required. One dangerous item might be a drive which
1761          * spins itself down, and come the time to write dirty pages, this
1762          * will fail, then block layer discards the data. Since we never
1763          * spin drives up, such devices simply cannot be used with ub anyway.
1764          */
1765         if (ub_sync_tur(lun->udev, lun) != 0) {
1766                 lun->changed = 1;
1767                 return 1;
1768         }
1769
1770         return lun->changed;
1771 }
1772
1773 static struct block_device_operations ub_bd_fops = {
1774         .owner          = THIS_MODULE,
1775         .open           = ub_bd_open,
1776         .release        = ub_bd_release,
1777         .ioctl          = ub_bd_ioctl,
1778         .media_changed  = ub_bd_media_changed,
1779         .revalidate_disk = ub_bd_revalidate,
1780 };
1781
1782 /*
1783  * Common ->done routine for commands executed synchronously.
1784  */
1785 static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1786 {
1787         struct completion *cop = cmd->back;
1788         complete(cop);
1789 }
1790
1791 /*
1792  * Test if the device has a check condition on it, synchronously.
1793  */
1794 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1795 {
1796         struct ub_scsi_cmd *cmd;
1797         enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1798         unsigned long flags;
1799         struct completion compl;
1800         int rc;
1801
1802         init_completion(&compl);
1803
1804         rc = -ENOMEM;
1805         if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1806                 goto err_alloc;
1807
1808         cmd->cdb[0] = TEST_UNIT_READY;
1809         cmd->cdb_len = 6;
1810         cmd->dir = UB_DIR_NONE;
1811         cmd->state = UB_CMDST_INIT;
1812         cmd->lun = lun;                 /* This may be NULL, but that's ok */
1813         cmd->done = ub_probe_done;
1814         cmd->back = &compl;
1815
1816         spin_lock_irqsave(sc->lock, flags);
1817         cmd->tag = sc->tagcnt++;
1818
1819         rc = ub_submit_scsi(sc, cmd);
1820         spin_unlock_irqrestore(sc->lock, flags);
1821
1822         if (rc != 0)
1823                 goto err_submit;
1824
1825         wait_for_completion(&compl);
1826
1827         rc = cmd->error;
1828
1829         if (rc == -EIO && cmd->key != 0)        /* Retries for benh's key */
1830                 rc = cmd->key;
1831
1832 err_submit:
1833         kfree(cmd);
1834 err_alloc:
1835         return rc;
1836 }
1837
1838 /*
1839  * Read the SCSI capacity synchronously (for probing).
1840  */
1841 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1842     struct ub_capacity *ret)
1843 {
1844         struct ub_scsi_cmd *cmd;
1845         struct scatterlist *sg;
1846         char *p;
1847         enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1848         unsigned long flags;
1849         unsigned int bsize, shift;
1850         unsigned long nsec;
1851         struct completion compl;
1852         int rc;
1853
1854         init_completion(&compl);
1855
1856         rc = -ENOMEM;
1857         if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1858                 goto err_alloc;
1859         p = (char *)cmd + sizeof(struct ub_scsi_cmd);
1860
1861         cmd->cdb[0] = 0x25;
1862         cmd->cdb_len = 10;
1863         cmd->dir = UB_DIR_READ;
1864         cmd->state = UB_CMDST_INIT;
1865         cmd->nsg = 1;
1866         sg = &cmd->sgv[0];
1867         sg_init_table(sg, UB_MAX_REQ_SG);
1868         sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1));
1869         cmd->len = 8;
1870         cmd->lun = lun;
1871         cmd->done = ub_probe_done;
1872         cmd->back = &compl;
1873
1874         spin_lock_irqsave(sc->lock, flags);
1875         cmd->tag = sc->tagcnt++;
1876
1877         rc = ub_submit_scsi(sc, cmd);
1878         spin_unlock_irqrestore(sc->lock, flags);
1879
1880         if (rc != 0)
1881                 goto err_submit;
1882
1883         wait_for_completion(&compl);
1884
1885         if (cmd->error != 0) {
1886                 rc = -EIO;
1887                 goto err_read;
1888         }
1889         if (cmd->act_len != 8) {
1890                 rc = -EIO;
1891                 goto err_read;
1892         }
1893
1894         /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
1895         nsec = be32_to_cpu(*(__be32 *)p) + 1;
1896         bsize = be32_to_cpu(*(__be32 *)(p + 4));
1897         switch (bsize) {
1898         case 512:       shift = 0;      break;
1899         case 1024:      shift = 1;      break;
1900         case 2048:      shift = 2;      break;
1901         case 4096:      shift = 3;      break;
1902         default:
1903                 rc = -EDOM;
1904                 goto err_inv_bsize;
1905         }
1906
1907         ret->bsize = bsize;
1908         ret->bshift = shift;
1909         ret->nsec = nsec << shift;
1910         rc = 0;
1911
1912 err_inv_bsize:
1913 err_read:
1914 err_submit:
1915         kfree(cmd);
1916 err_alloc:
1917         return rc;
1918 }
1919
1920 /*
1921  */
1922 static void ub_probe_urb_complete(struct urb *urb)
1923 {
1924         struct completion *cop = urb->context;
1925         complete(cop);
1926 }
1927
1928 static void ub_probe_timeout(unsigned long arg)
1929 {
1930         struct completion *cop = (struct completion *) arg;
1931         complete(cop);
1932 }
1933
1934 /*
1935  * Reset with a Bulk reset.
1936  */
1937 static int ub_sync_reset(struct ub_dev *sc)
1938 {
1939         int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1940         struct usb_ctrlrequest *cr;
1941         struct completion compl;
1942         struct timer_list timer;
1943         int rc;
1944
1945         init_completion(&compl);
1946
1947         cr = &sc->work_cr;
1948         cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1949         cr->bRequest = US_BULK_RESET_REQUEST;
1950         cr->wValue = cpu_to_le16(0);
1951         cr->wIndex = cpu_to_le16(ifnum);
1952         cr->wLength = cpu_to_le16(0);
1953
1954         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1955             (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
1956         sc->work_urb.actual_length = 0;
1957         sc->work_urb.error_count = 0;
1958         sc->work_urb.status = 0;
1959
1960         if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
1961                 printk(KERN_WARNING
1962                      "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
1963                 return rc;
1964         }
1965
1966         init_timer(&timer);
1967         timer.function = ub_probe_timeout;
1968         timer.data = (unsigned long) &compl;
1969         timer.expires = jiffies + UB_CTRL_TIMEOUT;
1970         add_timer(&timer);
1971
1972         wait_for_completion(&compl);
1973
1974         del_timer_sync(&timer);
1975         usb_kill_urb(&sc->work_urb);
1976
1977         return sc->work_urb.status;
1978 }
1979
1980 /*
1981  * Get number of LUNs by the way of Bulk GetMaxLUN command.
1982  */
1983 static int ub_sync_getmaxlun(struct ub_dev *sc)
1984 {
1985         int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1986         unsigned char *p;
1987         enum { ALLOC_SIZE = 1 };
1988         struct usb_ctrlrequest *cr;
1989         struct completion compl;
1990         struct timer_list timer;
1991         int nluns;
1992         int rc;
1993
1994         init_completion(&compl);
1995
1996         rc = -ENOMEM;
1997         if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1998                 goto err_alloc;
1999         *p = 55;
2000
2001         cr = &sc->work_cr;
2002         cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2003         cr->bRequest = US_BULK_GET_MAX_LUN;
2004         cr->wValue = cpu_to_le16(0);
2005         cr->wIndex = cpu_to_le16(ifnum);
2006         cr->wLength = cpu_to_le16(1);
2007
2008         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2009             (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
2010         sc->work_urb.actual_length = 0;
2011         sc->work_urb.error_count = 0;
2012         sc->work_urb.status = 0;
2013
2014         if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
2015                 goto err_submit;
2016
2017         init_timer(&timer);
2018         timer.function = ub_probe_timeout;
2019         timer.data = (unsigned long) &compl;
2020         timer.expires = jiffies + UB_CTRL_TIMEOUT;
2021         add_timer(&timer);
2022
2023         wait_for_completion(&compl);
2024
2025         del_timer_sync(&timer);
2026         usb_kill_urb(&sc->work_urb);
2027
2028         if ((rc = sc->work_urb.status) < 0)
2029                 goto err_io;
2030
2031         if (sc->work_urb.actual_length != 1) {
2032                 nluns = 0;
2033         } else {
2034                 if ((nluns = *p) == 55) {
2035                         nluns = 0;
2036                 } else {
2037                         /* GetMaxLUN returns the maximum LUN number */
2038                         nluns += 1;
2039                         if (nluns > UB_MAX_LUNS)
2040                                 nluns = UB_MAX_LUNS;
2041                 }
2042         }
2043
2044         kfree(p);
2045         return nluns;
2046
2047 err_io:
2048 err_submit:
2049         kfree(p);
2050 err_alloc:
2051         return rc;
2052 }
2053
2054 /*
2055  * Clear initial stalls.
2056  */
2057 static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2058 {
2059         int endp;
2060         struct usb_ctrlrequest *cr;
2061         struct completion compl;
2062         struct timer_list timer;
2063         int rc;
2064
2065         init_completion(&compl);
2066
2067         endp = usb_pipeendpoint(stalled_pipe);
2068         if (usb_pipein (stalled_pipe))
2069                 endp |= USB_DIR_IN;
2070
2071         cr = &sc->work_cr;
2072         cr->bRequestType = USB_RECIP_ENDPOINT;
2073         cr->bRequest = USB_REQ_CLEAR_FEATURE;
2074         cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2075         cr->wIndex = cpu_to_le16(endp);
2076         cr->wLength = cpu_to_le16(0);
2077
2078         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2079             (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2080         sc->work_urb.actual_length = 0;
2081         sc->work_urb.error_count = 0;
2082         sc->work_urb.status = 0;
2083
2084         if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2085                 printk(KERN_WARNING
2086                      "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2087                 return rc;
2088         }
2089
2090         init_timer(&timer);
2091         timer.function = ub_probe_timeout;
2092         timer.data = (unsigned long) &compl;
2093         timer.expires = jiffies + UB_CTRL_TIMEOUT;
2094         add_timer(&timer);
2095
2096         wait_for_completion(&compl);
2097
2098         del_timer_sync(&timer);
2099         usb_kill_urb(&sc->work_urb);
2100
2101         /* reset the endpoint toggle */
2102         usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0);
2103
2104         return 0;
2105 }
2106
2107 /*
2108  * Get the pipe settings.
2109  */
2110 static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2111     struct usb_interface *intf)
2112 {
2113         struct usb_host_interface *altsetting = intf->cur_altsetting;
2114         struct usb_endpoint_descriptor *ep_in = NULL;
2115         struct usb_endpoint_descriptor *ep_out = NULL;
2116         struct usb_endpoint_descriptor *ep;
2117         int i;
2118
2119         /*
2120          * Find the endpoints we need.
2121          * We are expecting a minimum of 2 endpoints - in and out (bulk).
2122          * We will ignore any others.
2123          */
2124         for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2125                 ep = &altsetting->endpoint[i].desc;
2126
2127                 /* Is it a BULK endpoint? */
2128                 if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
2129                                 == USB_ENDPOINT_XFER_BULK) {
2130                         /* BULK in or out? */
2131                         if (ep->bEndpointAddress & USB_DIR_IN) {
2132                                 if (ep_in == NULL)
2133                                         ep_in = ep;
2134                         } else {
2135                                 if (ep_out == NULL)
2136                                         ep_out = ep;
2137                         }
2138                 }
2139         }
2140
2141         if (ep_in == NULL || ep_out == NULL) {
2142                 printk(KERN_NOTICE "%s: failed endpoint check\n",
2143                     sc->name);
2144                 return -ENODEV;
2145         }
2146
2147         /* Calculate and store the pipe values */
2148         sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2149         sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2150         sc->send_bulk_pipe = usb_sndbulkpipe(dev,
2151                 ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2152         sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, 
2153                 ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2154
2155         return 0;
2156 }
2157
2158 /*
2159  * Probing is done in the process context, which allows us to cheat
2160  * and not to build a state machine for the discovery.
2161  */
2162 static int ub_probe(struct usb_interface *intf,
2163     const struct usb_device_id *dev_id)
2164 {
2165         struct ub_dev *sc;
2166         int nluns;
2167         int rc;
2168         int i;
2169
2170         if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2171                 return -ENXIO;
2172
2173         rc = -ENOMEM;
2174         if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2175                 goto err_core;
2176         sc->lock = ub_next_lock();
2177         INIT_LIST_HEAD(&sc->luns);
2178         usb_init_urb(&sc->work_urb);
2179         tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2180         atomic_set(&sc->poison, 0);
2181         INIT_WORK(&sc->reset_work, ub_reset_task);
2182         init_waitqueue_head(&sc->reset_wait);
2183
2184         init_timer(&sc->work_timer);
2185         sc->work_timer.data = (unsigned long) sc;
2186         sc->work_timer.function = ub_urb_timeout;
2187
2188         ub_init_completion(&sc->work_done);
2189         sc->work_done.done = 1;         /* A little yuk, but oh well... */
2190
2191         sc->dev = interface_to_usbdev(intf);
2192         sc->intf = intf;
2193         // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2194         usb_set_intfdata(intf, sc);
2195         usb_get_dev(sc->dev);
2196         /*
2197          * Since we give the interface struct to the block level through
2198          * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
2199          * oopses on close after a disconnect (kernels 2.6.16 and up).
2200          */
2201         usb_get_intf(sc->intf);
2202
2203         snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2204             sc->dev->bus->busnum, sc->dev->devnum);
2205
2206         /* XXX Verify that we can handle the device (from descriptors) */
2207
2208         if (ub_get_pipes(sc, sc->dev, intf) != 0)
2209                 goto err_dev_desc;
2210
2211         /*
2212          * At this point, all USB initialization is done, do upper layer.
2213          * We really hate halfway initialized structures, so from the
2214          * invariants perspective, this ub_dev is fully constructed at
2215          * this point.
2216          */
2217
2218         /*
2219          * This is needed to clear toggles. It is a problem only if we do
2220          * `rmmod ub && modprobe ub` without disconnects, but we like that.
2221          */
2222 #if 0 /* iPod Mini fails if we do this (big white iPod works) */
2223         ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2224         ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2225 #endif
2226
2227         /*
2228          * The way this is used by the startup code is a little specific.
2229          * A SCSI check causes a USB stall. Our common case code sees it
2230          * and clears the check, after which the device is ready for use.
2231          * But if a check was not present, any command other than
2232          * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2233          *
2234          * If we neglect to clear the SCSI check, the first real command fails
2235          * (which is the capacity readout). We clear that and retry, but why
2236          * causing spurious retries for no reason.
2237          *
2238          * Revalidation may start with its own TEST_UNIT_READY, but that one
2239          * has to succeed, so we clear checks with an additional one here.
2240          * In any case it's not our business how revaliadation is implemented.
2241          */
2242         for (i = 0; i < 3; i++) {  /* Retries for the schwag key from KS'04 */
2243                 if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
2244                 if (rc != 0x6) break;
2245                 msleep(10);
2246         }
2247
2248         nluns = 1;
2249         for (i = 0; i < 3; i++) {
2250                 if ((rc = ub_sync_getmaxlun(sc)) < 0)
2251                         break;
2252                 if (rc != 0) {
2253                         nluns = rc;
2254                         break;
2255                 }
2256                 msleep(100);
2257         }
2258
2259         for (i = 0; i < nluns; i++) {
2260                 ub_probe_lun(sc, i);
2261         }
2262         return 0;
2263
2264 err_dev_desc:
2265         usb_set_intfdata(intf, NULL);
2266         usb_put_intf(sc->intf);
2267         usb_put_dev(sc->dev);
2268         kfree(sc);
2269 err_core:
2270         return rc;
2271 }
2272
2273 static int ub_probe_lun(struct ub_dev *sc, int lnum)
2274 {
2275         struct ub_lun *lun;
2276         struct request_queue *q;
2277         struct gendisk *disk;
2278         int rc;
2279
2280         rc = -ENOMEM;
2281         if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
2282                 goto err_alloc;
2283         lun->num = lnum;
2284
2285         rc = -ENOSR;
2286         if ((lun->id = ub_id_get()) == -1)
2287                 goto err_id;
2288
2289         lun->udev = sc;
2290
2291         snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2292             lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2293
2294         lun->removable = 1;             /* XXX Query this from the device */
2295         lun->changed = 1;               /* ub_revalidate clears only */
2296         ub_revalidate(sc, lun);
2297
2298         rc = -ENOMEM;
2299         if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
2300                 goto err_diskalloc;
2301
2302         sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
2303         disk->major = UB_MAJOR;
2304         disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2305         disk->fops = &ub_bd_fops;
2306         disk->private_data = lun;
2307         disk->driverfs_dev = &sc->intf->dev;
2308
2309         rc = -ENOMEM;
2310         if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2311                 goto err_blkqinit;
2312
2313         disk->queue = q;
2314
2315         blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2316         blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
2317         blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2318         blk_queue_segment_boundary(q, 0xffffffff);      /* Dubious. */
2319         blk_queue_max_sectors(q, UB_MAX_SECTORS);
2320         blk_queue_hardsect_size(q, lun->capacity.bsize);
2321
2322         lun->disk = disk;
2323         q->queuedata = lun;
2324         list_add(&lun->link, &sc->luns);
2325
2326         set_capacity(disk, lun->capacity.nsec);
2327         if (lun->removable)
2328                 disk->flags |= GENHD_FL_REMOVABLE;
2329
2330         add_disk(disk);
2331
2332         return 0;
2333
2334 err_blkqinit:
2335         put_disk(disk);
2336 err_diskalloc:
2337         ub_id_put(lun->id);
2338 err_id:
2339         kfree(lun);
2340 err_alloc:
2341         return rc;
2342 }
2343
2344 static void ub_disconnect(struct usb_interface *intf)
2345 {
2346         struct ub_dev *sc = usb_get_intfdata(intf);
2347         struct ub_lun *lun;
2348         unsigned long flags;
2349
2350         /*
2351          * Prevent ub_bd_release from pulling the rug from under us.
2352          * XXX This is starting to look like a kref.
2353          * XXX Why not to take this ref at probe time?
2354          */
2355         spin_lock_irqsave(&ub_lock, flags);
2356         sc->openc++;
2357         spin_unlock_irqrestore(&ub_lock, flags);
2358
2359         /*
2360          * Fence stall clearnings, operations triggered by unlinkings and so on.
2361          * We do not attempt to unlink any URBs, because we do not trust the
2362          * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2363          */
2364         atomic_set(&sc->poison, 1);
2365
2366         /*
2367          * Wait for reset to end, if any.
2368          */
2369         wait_event(sc->reset_wait, !sc->reset);
2370
2371         /*
2372          * Blow away queued commands.
2373          *
2374          * Actually, this never works, because before we get here
2375          * the HCD terminates outstanding URB(s). It causes our
2376          * SCSI command queue to advance, commands fail to submit,
2377          * and the whole queue drains. So, we just use this code to
2378          * print warnings.
2379          */
2380         spin_lock_irqsave(sc->lock, flags);
2381         {
2382                 struct ub_scsi_cmd *cmd;
2383                 int cnt = 0;
2384                 while ((cmd = ub_cmdq_peek(sc)) != NULL) {
2385                         cmd->error = -ENOTCONN;
2386                         cmd->state = UB_CMDST_DONE;
2387                         ub_cmdq_pop(sc);
2388                         (*cmd->done)(sc, cmd);
2389                         cnt++;
2390                 }
2391                 if (cnt != 0) {
2392                         printk(KERN_WARNING "%s: "
2393                             "%d was queued after shutdown\n", sc->name, cnt);
2394                 }
2395         }
2396         spin_unlock_irqrestore(sc->lock, flags);
2397
2398         /*
2399          * Unregister the upper layer.
2400          */
2401         list_for_each_entry(lun, &sc->luns, link) {
2402                 del_gendisk(lun->disk);
2403                 /*
2404                  * I wish I could do:
2405                  *    set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
2406                  * As it is, we rely on our internal poisoning and let
2407                  * the upper levels to spin furiously failing all the I/O.
2408                  */
2409         }
2410
2411         /*
2412          * Testing for -EINPROGRESS is always a bug, so we are bending
2413          * the rules a little.
2414          */
2415         spin_lock_irqsave(sc->lock, flags);
2416         if (sc->work_urb.status == -EINPROGRESS) {      /* janitors: ignore */
2417                 printk(KERN_WARNING "%s: "
2418                     "URB is active after disconnect\n", sc->name);
2419         }
2420         spin_unlock_irqrestore(sc->lock, flags);
2421
2422         /*
2423          * There is virtually no chance that other CPU runs times so long
2424          * after ub_urb_complete should have called del_timer, but only if HCD
2425          * didn't forget to deliver a callback on unlink.
2426          */
2427         del_timer_sync(&sc->work_timer);
2428
2429         /*
2430          * At this point there must be no commands coming from anyone
2431          * and no URBs left in transit.
2432          */
2433
2434         ub_put(sc);
2435 }
2436
2437 static struct usb_driver ub_driver = {
2438         .name =         "ub",
2439         .probe =        ub_probe,
2440         .disconnect =   ub_disconnect,
2441         .id_table =     ub_usb_ids,
2442 };
2443
2444 static int __init ub_init(void)
2445 {
2446         int rc;
2447         int i;
2448
2449         for (i = 0; i < UB_QLOCK_NUM; i++)
2450                 spin_lock_init(&ub_qlockv[i]);
2451
2452         if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2453                 goto err_regblkdev;
2454
2455         if ((rc = usb_register(&ub_driver)) != 0)
2456                 goto err_register;
2457
2458         usb_usual_set_present(USB_US_TYPE_UB);
2459         return 0;
2460
2461 err_register:
2462         unregister_blkdev(UB_MAJOR, DRV_NAME);
2463 err_regblkdev:
2464         return rc;
2465 }
2466
2467 static void __exit ub_exit(void)
2468 {
2469         usb_deregister(&ub_driver);
2470
2471         unregister_blkdev(UB_MAJOR, DRV_NAME);
2472         usb_usual_clear_present(USB_US_TYPE_UB);
2473 }
2474
2475 module_init(ub_init);
2476 module_exit(ub_exit);
2477
2478 MODULE_LICENSE("GPL");