Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * The low performance USB storage driver (ub). | |
3 | * | |
4 | * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net) | |
5 | * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com) | |
6 | * | |
7 | * This work is a part of Linux kernel, is derived from it, | |
8 | * and is not licensed separately. See file COPYING for details. | |
9 | * | |
10 | * TODO (sorted by decreasing priority) | |
1da177e4 LT |
11 | * -- set readonly flag for CDs, set removable flag for CF readers |
12 | * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) | |
1da177e4 | 13 | * -- verify the 13 conditions and do bulk resets |
ba6abf13 | 14 | * -- highmem |
1da177e4 LT |
15 | * -- move top_sense and work_bcs into separate allocations (if they survive) |
16 | * for cache purists and esoteric architectures. | |
ba6abf13 | 17 | * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ? |
1da177e4 | 18 | * -- prune comments, they are too volumnous |
1da177e4 | 19 | * -- Resove XXX's |
1872bceb | 20 | * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring. |
1da177e4 LT |
21 | */ |
22 | #include <linux/kernel.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/usb.h> | |
a00828e9 | 25 | #include <linux/usb_usual.h> |
1da177e4 | 26 | #include <linux/blkdev.h> |
1da177e4 | 27 | #include <linux/timer.h> |
45711f1a | 28 | #include <linux/scatterlist.h> |
1da177e4 LT |
29 | #include <scsi/scsi.h> |
30 | ||
31 | #define DRV_NAME "ub" | |
1da177e4 LT |
32 | |
33 | #define UB_MAJOR 180 | |
34 | ||
1872bceb PZ |
35 | /* |
36 | * The command state machine is the key model for understanding of this driver. | |
37 | * | |
38 | * The general rule is that all transitions are done towards the bottom | |
39 | * of the diagram, thus preventing any loops. | |
40 | * | |
41 | * An exception to that is how the STAT state is handled. A counter allows it | |
42 | * to be re-entered along the path marked with [C]. | |
43 | * | |
44 | * +--------+ | |
45 | * ! INIT ! | |
46 | * +--------+ | |
47 | * ! | |
48 | * ub_scsi_cmd_start fails ->--------------------------------------\ | |
49 | * ! ! | |
50 | * V ! | |
51 | * +--------+ ! | |
52 | * ! CMD ! ! | |
53 | * +--------+ ! | |
54 | * ! +--------+ ! | |
55 | * was -EPIPE -->-------------------------------->! CLEAR ! ! | |
56 | * ! +--------+ ! | |
57 | * ! ! ! | |
58 | * was error -->------------------------------------- ! --------->\ | |
59 | * ! ! ! | |
60 | * /--<-- cmd->dir == NONE ? ! ! | |
61 | * ! ! ! ! | |
62 | * ! V ! ! | |
63 | * ! +--------+ ! ! | |
64 | * ! ! DATA ! ! ! | |
65 | * ! +--------+ ! ! | |
66 | * ! ! +---------+ ! ! | |
67 | * ! was -EPIPE -->--------------->! CLR2STS ! ! ! | |
68 | * ! ! +---------+ ! ! | |
69 | * ! ! ! ! ! | |
70 | * ! ! was error -->---- ! --------->\ | |
71 | * ! was error -->--------------------- ! ------------- ! --------->\ | |
72 | * ! ! ! ! ! | |
73 | * ! V ! ! ! | |
74 | * \--->+--------+ ! ! ! | |
75 | * ! STAT !<--------------------------/ ! ! | |
76 | * /--->+--------+ ! ! | |
77 | * ! ! ! ! | |
78 | * [C] was -EPIPE -->-----------\ ! ! | |
79 | * ! ! ! ! ! | |
80 | * +<---- len == 0 ! ! ! | |
81 | * ! ! ! ! ! | |
82 | * ! was error -->--------------------------------------!---------->\ | |
83 | * ! ! ! ! ! | |
84 | * +<---- bad CSW ! ! ! | |
85 | * +<---- bad tag ! ! ! | |
86 | * ! ! V ! ! | |
87 | * ! ! +--------+ ! ! | |
88 | * ! ! ! CLRRS ! ! ! | |
89 | * ! ! +--------+ ! ! | |
90 | * ! ! ! ! ! | |
91 | * \------- ! --------------------[C]--------\ ! ! | |
92 | * ! ! ! ! | |
93 | * cmd->error---\ +--------+ ! ! | |
94 | * ! +--------------->! SENSE !<----------/ ! | |
95 | * STAT_FAIL----/ +--------+ ! | |
96 | * ! ! V | |
97 | * ! V +--------+ | |
98 | * \--------------------------------\--------------------->! DONE ! | |
99 | * +--------+ | |
100 | */ | |
101 | ||
1da177e4 | 102 | /* |
f4800078 PZ |
103 | * This many LUNs per USB device. |
104 | * Every one of them takes a host, see UB_MAX_HOSTS. | |
1da177e4 | 105 | */ |
9f793d2c | 106 | #define UB_MAX_LUNS 9 |
f4800078 PZ |
107 | |
108 | /* | |
109 | */ | |
110 | ||
4fb729f5 | 111 | #define UB_PARTS_PER_LUN 8 |
1da177e4 LT |
112 | |
113 | #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ | |
114 | ||
115 | #define UB_SENSE_SIZE 18 | |
116 | ||
117 | /* | |
118 | */ | |
119 | ||
120 | /* command block wrapper */ | |
121 | struct bulk_cb_wrap { | |
122 | __le32 Signature; /* contains 'USBC' */ | |
123 | u32 Tag; /* unique per command id */ | |
124 | __le32 DataTransferLength; /* size of data */ | |
125 | u8 Flags; /* direction in bit 0 */ | |
f4800078 | 126 | u8 Lun; /* LUN */ |
1da177e4 LT |
127 | u8 Length; /* of of the CDB */ |
128 | u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ | |
129 | }; | |
130 | ||
131 | #define US_BULK_CB_WRAP_LEN 31 | |
132 | #define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */ | |
133 | #define US_BULK_FLAG_IN 1 | |
134 | #define US_BULK_FLAG_OUT 0 | |
135 | ||
136 | /* command status wrapper */ | |
137 | struct bulk_cs_wrap { | |
138 | __le32 Signature; /* should = 'USBS' */ | |
139 | u32 Tag; /* same as original command */ | |
140 | __le32 Residue; /* amount not transferred */ | |
141 | u8 Status; /* see below */ | |
142 | }; | |
143 | ||
144 | #define US_BULK_CS_WRAP_LEN 13 | |
145 | #define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ | |
1da177e4 LT |
146 | #define US_BULK_STAT_OK 0 |
147 | #define US_BULK_STAT_FAIL 1 | |
148 | #define US_BULK_STAT_PHASE 2 | |
149 | ||
150 | /* bulk-only class specific requests */ | |
151 | #define US_BULK_RESET_REQUEST 0xff | |
152 | #define US_BULK_GET_MAX_LUN 0xfe | |
153 | ||
154 | /* | |
155 | */ | |
156 | struct ub_dev; | |
157 | ||
64bd8453 | 158 | #define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */ |
1da177e4 LT |
159 | #define UB_MAX_SECTORS 64 |
160 | ||
161 | /* | |
162 | * A second is more than enough for a 32K transfer (UB_MAX_SECTORS) | |
163 | * even if a webcam hogs the bus, but some devices need time to spin up. | |
164 | */ | |
165 | #define UB_URB_TIMEOUT (HZ*2) | |
166 | #define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */ | |
167 | #define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */ | |
168 | #define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */ | |
169 | ||
170 | /* | |
171 | * An instance of a SCSI command in transit. | |
172 | */ | |
173 | #define UB_DIR_NONE 0 | |
174 | #define UB_DIR_READ 1 | |
175 | #define UB_DIR_ILLEGAL2 2 | |
176 | #define UB_DIR_WRITE 3 | |
177 | ||
178 | #define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \ | |
179 | (((c)==UB_DIR_READ)? 'r': 'n')) | |
180 | ||
181 | enum ub_scsi_cmd_state { | |
182 | UB_CMDST_INIT, /* Initial state */ | |
183 | UB_CMDST_CMD, /* Command submitted */ | |
184 | UB_CMDST_DATA, /* Data phase */ | |
185 | UB_CMDST_CLR2STS, /* Clearing before requesting status */ | |
186 | UB_CMDST_STAT, /* Status phase */ | |
187 | UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ | |
1872bceb | 188 | UB_CMDST_CLRRS, /* Clearing before retrying status */ |
1da177e4 LT |
189 | UB_CMDST_SENSE, /* Sending Request Sense */ |
190 | UB_CMDST_DONE /* Final state */ | |
191 | }; | |
192 | ||
1da177e4 LT |
193 | struct ub_scsi_cmd { |
194 | unsigned char cdb[UB_MAX_CDB_SIZE]; | |
195 | unsigned char cdb_len; | |
196 | ||
197 | unsigned char dir; /* 0 - none, 1 - read, 3 - write. */ | |
1da177e4 LT |
198 | enum ub_scsi_cmd_state state; |
199 | unsigned int tag; | |
200 | struct ub_scsi_cmd *next; | |
201 | ||
202 | int error; /* Return code - valid upon done */ | |
203 | unsigned int act_len; /* Return size */ | |
204 | unsigned char key, asc, ascq; /* May be valid if error==-EIO */ | |
205 | ||
206 | int stat_count; /* Retries getting status. */ | |
207 | ||
1da177e4 | 208 | unsigned int len; /* Requested length */ |
a1cf96ef PZ |
209 | unsigned int current_sg; |
210 | unsigned int nsg; /* sgv[nsg] */ | |
211 | struct scatterlist sgv[UB_MAX_REQ_SG]; | |
1da177e4 | 212 | |
f4800078 | 213 | struct ub_lun *lun; |
1da177e4 LT |
214 | void (*done)(struct ub_dev *, struct ub_scsi_cmd *); |
215 | void *back; | |
216 | }; | |
217 | ||
2c26c9e6 PZ |
218 | struct ub_request { |
219 | struct request *rq; | |
220 | unsigned int current_try; | |
221 | unsigned int nsg; /* sgv[nsg] */ | |
222 | struct scatterlist sgv[UB_MAX_REQ_SG]; | |
223 | }; | |
224 | ||
1da177e4 LT |
225 | /* |
226 | */ | |
227 | struct ub_capacity { | |
228 | unsigned long nsec; /* Linux size - 512 byte sectors */ | |
229 | unsigned int bsize; /* Linux hardsect_size */ | |
230 | unsigned int bshift; /* Shift between 512 and hard sects */ | |
231 | }; | |
232 | ||
1da177e4 LT |
233 | /* |
234 | * This is a direct take-off from linux/include/completion.h | |
235 | * The difference is that I do not wait on this thing, just poll. | |
236 | * When I want to wait (ub_probe), I just use the stock completion. | |
237 | * | |
238 | * Note that INIT_COMPLETION takes no lock. It is correct. But why | |
239 | * in the bloody hell that thing takes struct instead of pointer to struct | |
240 | * is quite beyond me. I just copied it from the stock completion. | |
241 | */ | |
242 | struct ub_completion { | |
243 | unsigned int done; | |
244 | spinlock_t lock; | |
245 | }; | |
246 | ||
247 | static inline void ub_init_completion(struct ub_completion *x) | |
248 | { | |
249 | x->done = 0; | |
250 | spin_lock_init(&x->lock); | |
251 | } | |
252 | ||
253 | #define UB_INIT_COMPLETION(x) ((x).done = 0) | |
254 | ||
255 | static void ub_complete(struct ub_completion *x) | |
256 | { | |
257 | unsigned long flags; | |
258 | ||
259 | spin_lock_irqsave(&x->lock, flags); | |
260 | x->done++; | |
261 | spin_unlock_irqrestore(&x->lock, flags); | |
262 | } | |
263 | ||
264 | static int ub_is_completed(struct ub_completion *x) | |
265 | { | |
266 | unsigned long flags; | |
267 | int ret; | |
268 | ||
269 | spin_lock_irqsave(&x->lock, flags); | |
270 | ret = x->done; | |
271 | spin_unlock_irqrestore(&x->lock, flags); | |
272 | return ret; | |
273 | } | |
274 | ||
275 | /* | |
276 | */ | |
277 | struct ub_scsi_cmd_queue { | |
278 | int qlen, qmax; | |
279 | struct ub_scsi_cmd *head, *tail; | |
280 | }; | |
281 | ||
282 | /* | |
f4800078 PZ |
283 | * The block device instance (one per LUN). |
284 | */ | |
285 | struct ub_lun { | |
286 | struct ub_dev *udev; | |
287 | struct list_head link; | |
288 | struct gendisk *disk; | |
289 | int id; /* Host index */ | |
290 | int num; /* LUN number */ | |
291 | char name[16]; | |
292 | ||
293 | int changed; /* Media was changed */ | |
294 | int removable; | |
295 | int readonly; | |
f4800078 | 296 | |
2c26c9e6 PZ |
297 | struct ub_request urq; |
298 | ||
f4800078 PZ |
299 | /* Use Ingo's mempool if or when we have more than one command. */ |
300 | /* | |
301 | * Currently we never need more than one command for the whole device. | |
302 | * However, giving every LUN a command is a cheap and automatic way | |
303 | * to enforce fairness between them. | |
304 | */ | |
305 | int cmda[1]; | |
306 | struct ub_scsi_cmd cmdv[1]; | |
307 | ||
308 | struct ub_capacity capacity; | |
309 | }; | |
310 | ||
311 | /* | |
312 | * The USB device instance. | |
1da177e4 LT |
313 | */ |
314 | struct ub_dev { | |
65b4fe55 | 315 | spinlock_t *lock; |
1da177e4 LT |
316 | atomic_t poison; /* The USB device is disconnected */ |
317 | int openc; /* protected by ub_lock! */ | |
318 | /* kref is too implicit for our taste */ | |
2c26c9e6 | 319 | int reset; /* Reset is running */ |
1da177e4 | 320 | unsigned int tagcnt; |
f4800078 | 321 | char name[12]; |
1da177e4 LT |
322 | struct usb_device *dev; |
323 | struct usb_interface *intf; | |
324 | ||
f4800078 | 325 | struct list_head luns; |
1da177e4 LT |
326 | |
327 | unsigned int send_bulk_pipe; /* cached pipe values */ | |
328 | unsigned int recv_bulk_pipe; | |
329 | unsigned int send_ctrl_pipe; | |
330 | unsigned int recv_ctrl_pipe; | |
331 | ||
332 | struct tasklet_struct tasklet; | |
333 | ||
1da177e4 LT |
334 | struct ub_scsi_cmd_queue cmd_queue; |
335 | struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ | |
336 | unsigned char top_sense[UB_SENSE_SIZE]; | |
337 | ||
338 | struct ub_completion work_done; | |
339 | struct urb work_urb; | |
340 | struct timer_list work_timer; | |
341 | int last_pipe; /* What might need clearing */ | |
1872bceb | 342 | __le32 signature; /* Learned signature */ |
1da177e4 LT |
343 | struct bulk_cb_wrap work_bcb; |
344 | struct bulk_cs_wrap work_bcs; | |
345 | struct usb_ctrlrequest work_cr; | |
346 | ||
2c26c9e6 PZ |
347 | struct work_struct reset_work; |
348 | wait_queue_head_t reset_wait; | |
349 | ||
64bd8453 | 350 | int sg_stat[6]; |
1da177e4 LT |
351 | }; |
352 | ||
353 | /* | |
354 | */ | |
355 | static void ub_cleanup(struct ub_dev *sc); | |
6c1eb8c1 | 356 | static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); |
2c26c9e6 PZ |
357 | static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, |
358 | struct ub_scsi_cmd *cmd, struct ub_request *urq); | |
359 | static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, | |
360 | struct ub_scsi_cmd *cmd, struct ub_request *urq); | |
1da177e4 | 361 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
d1ad4ea3 | 362 | static void ub_end_rq(struct request *rq, unsigned int status); |
2c26c9e6 PZ |
363 | static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, |
364 | struct ub_request *urq, struct ub_scsi_cmd *cmd); | |
1da177e4 | 365 | static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
7d12e780 | 366 | static void ub_urb_complete(struct urb *urb); |
1da177e4 LT |
367 | static void ub_scsi_action(unsigned long _dev); |
368 | static void ub_scsi_dispatch(struct ub_dev *sc); | |
369 | static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | |
a1cf96ef | 370 | static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
1da177e4 | 371 | static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); |
1872bceb | 372 | static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
1da177e4 | 373 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
1872bceb | 374 | static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
1da177e4 LT |
375 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
376 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |
377 | int stalled_pipe); | |
378 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); | |
2c2e4a2e | 379 | static void ub_reset_enter(struct ub_dev *sc, int try); |
c4028958 | 380 | static void ub_reset_task(struct work_struct *work); |
f4800078 PZ |
381 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); |
382 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, | |
383 | struct ub_capacity *ret); | |
2c2e4a2e PZ |
384 | static int ub_sync_reset(struct ub_dev *sc); |
385 | static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe); | |
f4800078 | 386 | static int ub_probe_lun(struct ub_dev *sc, int lnum); |
1da177e4 LT |
387 | |
388 | /* | |
389 | */ | |
a00828e9 PZ |
390 | #ifdef CONFIG_USB_LIBUSUAL |
391 | ||
392 | #define ub_usb_ids storage_usb_ids | |
393 | #else | |
394 | ||
1da177e4 | 395 | static struct usb_device_id ub_usb_ids[] = { |
1da177e4 LT |
396 | { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, |
397 | { } | |
398 | }; | |
399 | ||
400 | MODULE_DEVICE_TABLE(usb, ub_usb_ids); | |
a00828e9 | 401 | #endif /* CONFIG_USB_LIBUSUAL */ |
1da177e4 LT |
402 | |
403 | /* | |
404 | * Find me a way to identify "next free minor" for add_disk(), | |
405 | * and the array disappears the next day. However, the number of | |
406 | * hosts has something to do with the naming and /proc/partitions. | |
407 | * This has to be thought out in detail before changing. | |
408 | * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure. | |
409 | */ | |
410 | #define UB_MAX_HOSTS 26 | |
411 | static char ub_hostv[UB_MAX_HOSTS]; | |
f4800078 | 412 | |
65b4fe55 PZ |
413 | #define UB_QLOCK_NUM 5 |
414 | static spinlock_t ub_qlockv[UB_QLOCK_NUM]; | |
415 | static int ub_qlock_next = 0; | |
416 | ||
1da177e4 LT |
417 | static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ |
418 | ||
1da177e4 LT |
419 | /* |
420 | * The id allocator. | |
421 | * | |
422 | * This also stores the host for indexing by minor, which is somewhat dirty. | |
423 | */ | |
424 | static int ub_id_get(void) | |
425 | { | |
426 | unsigned long flags; | |
427 | int i; | |
428 | ||
429 | spin_lock_irqsave(&ub_lock, flags); | |
430 | for (i = 0; i < UB_MAX_HOSTS; i++) { | |
431 | if (ub_hostv[i] == 0) { | |
432 | ub_hostv[i] = 1; | |
433 | spin_unlock_irqrestore(&ub_lock, flags); | |
434 | return i; | |
435 | } | |
436 | } | |
437 | spin_unlock_irqrestore(&ub_lock, flags); | |
438 | return -1; | |
439 | } | |
440 | ||
441 | static void ub_id_put(int id) | |
442 | { | |
443 | unsigned long flags; | |
444 | ||
445 | if (id < 0 || id >= UB_MAX_HOSTS) { | |
446 | printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id); | |
447 | return; | |
448 | } | |
449 | ||
450 | spin_lock_irqsave(&ub_lock, flags); | |
451 | if (ub_hostv[id] == 0) { | |
452 | spin_unlock_irqrestore(&ub_lock, flags); | |
453 | printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id); | |
454 | return; | |
455 | } | |
456 | ub_hostv[id] = 0; | |
457 | spin_unlock_irqrestore(&ub_lock, flags); | |
458 | } | |
459 | ||
65b4fe55 PZ |
460 | /* |
461 | * This is necessitated by the fact that blk_cleanup_queue does not | |
462 | * necesserily destroy the queue. Instead, it may merely decrease q->refcnt. | |
463 | * Since our blk_init_queue() passes a spinlock common with ub_dev, | |
464 | * we have life time issues when ub_cleanup frees ub_dev. | |
465 | */ | |
466 | static spinlock_t *ub_next_lock(void) | |
467 | { | |
468 | unsigned long flags; | |
469 | spinlock_t *ret; | |
470 | ||
471 | spin_lock_irqsave(&ub_lock, flags); | |
472 | ret = &ub_qlockv[ub_qlock_next]; | |
473 | ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM; | |
474 | spin_unlock_irqrestore(&ub_lock, flags); | |
475 | return ret; | |
476 | } | |
477 | ||
1da177e4 LT |
478 | /* |
479 | * Downcount for deallocation. This rides on two assumptions: | |
480 | * - once something is poisoned, its refcount cannot grow | |
481 | * - opens cannot happen at this time (del_gendisk was done) | |
482 | * If the above is true, we can drop the lock, which we need for | |
483 | * blk_cleanup_queue(): the silly thing may attempt to sleep. | |
484 | * [Actually, it never needs to sleep for us, but it calls might_sleep()] | |
485 | */ | |
486 | static void ub_put(struct ub_dev *sc) | |
487 | { | |
488 | unsigned long flags; | |
489 | ||
490 | spin_lock_irqsave(&ub_lock, flags); | |
491 | --sc->openc; | |
492 | if (sc->openc == 0 && atomic_read(&sc->poison)) { | |
493 | spin_unlock_irqrestore(&ub_lock, flags); | |
494 | ub_cleanup(sc); | |
495 | } else { | |
496 | spin_unlock_irqrestore(&ub_lock, flags); | |
497 | } | |
498 | } | |
499 | ||
500 | /* | |
501 | * Final cleanup and deallocation. | |
502 | */ | |
503 | static void ub_cleanup(struct ub_dev *sc) | |
504 | { | |
f4800078 PZ |
505 | struct list_head *p; |
506 | struct ub_lun *lun; | |
165125e1 | 507 | struct request_queue *q; |
1da177e4 | 508 | |
f4800078 PZ |
509 | while (!list_empty(&sc->luns)) { |
510 | p = sc->luns.next; | |
511 | lun = list_entry(p, struct ub_lun, link); | |
512 | list_del(p); | |
1da177e4 | 513 | |
f4800078 PZ |
514 | /* I don't think queue can be NULL. But... Stolen from sx8.c */ |
515 | if ((q = lun->disk->queue) != NULL) | |
516 | blk_cleanup_queue(q); | |
517 | /* | |
518 | * If we zero disk->private_data BEFORE put_disk, we have | |
519 | * to check for NULL all over the place in open, release, | |
520 | * check_media and revalidate, because the block level | |
521 | * semaphore is well inside the put_disk. | |
522 | * But we cannot zero after the call, because *disk is gone. | |
523 | * The sd.c is blatantly racy in this area. | |
524 | */ | |
525 | /* disk->private_data = NULL; */ | |
526 | put_disk(lun->disk); | |
527 | lun->disk = NULL; | |
528 | ||
529 | ub_id_put(lun->id); | |
530 | kfree(lun); | |
531 | } | |
1da177e4 | 532 | |
77ef6c4d PZ |
533 | usb_set_intfdata(sc->intf, NULL); |
534 | usb_put_intf(sc->intf); | |
535 | usb_put_dev(sc->dev); | |
1da177e4 LT |
536 | kfree(sc); |
537 | } | |
538 | ||
539 | /* | |
540 | * The "command allocator". | |
541 | */ | |
f4800078 | 542 | static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun) |
1da177e4 LT |
543 | { |
544 | struct ub_scsi_cmd *ret; | |
545 | ||
f4800078 | 546 | if (lun->cmda[0]) |
1da177e4 | 547 | return NULL; |
f4800078 PZ |
548 | ret = &lun->cmdv[0]; |
549 | lun->cmda[0] = 1; | |
1da177e4 LT |
550 | return ret; |
551 | } | |
552 | ||
f4800078 | 553 | static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd) |
1da177e4 | 554 | { |
f4800078 | 555 | if (cmd != &lun->cmdv[0]) { |
1da177e4 | 556 | printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", |
f4800078 | 557 | lun->name, cmd); |
1da177e4 LT |
558 | return; |
559 | } | |
f4800078 PZ |
560 | if (!lun->cmda[0]) { |
561 | printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name); | |
1da177e4 LT |
562 | return; |
563 | } | |
f4800078 | 564 | lun->cmda[0] = 0; |
1da177e4 LT |
565 | } |
566 | ||
567 | /* | |
568 | * The command queue. | |
569 | */ | |
570 | static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
571 | { | |
572 | struct ub_scsi_cmd_queue *t = &sc->cmd_queue; | |
573 | ||
574 | if (t->qlen++ == 0) { | |
575 | t->head = cmd; | |
576 | t->tail = cmd; | |
577 | } else { | |
578 | t->tail->next = cmd; | |
579 | t->tail = cmd; | |
580 | } | |
581 | ||
582 | if (t->qlen > t->qmax) | |
583 | t->qmax = t->qlen; | |
584 | } | |
585 | ||
586 | static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
587 | { | |
588 | struct ub_scsi_cmd_queue *t = &sc->cmd_queue; | |
589 | ||
590 | if (t->qlen++ == 0) { | |
591 | t->head = cmd; | |
592 | t->tail = cmd; | |
593 | } else { | |
594 | cmd->next = t->head; | |
595 | t->head = cmd; | |
596 | } | |
597 | ||
598 | if (t->qlen > t->qmax) | |
599 | t->qmax = t->qlen; | |
600 | } | |
601 | ||
602 | static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc) | |
603 | { | |
604 | struct ub_scsi_cmd_queue *t = &sc->cmd_queue; | |
605 | struct ub_scsi_cmd *cmd; | |
606 | ||
607 | if (t->qlen == 0) | |
608 | return NULL; | |
609 | if (--t->qlen == 0) | |
610 | t->tail = NULL; | |
611 | cmd = t->head; | |
612 | t->head = cmd->next; | |
613 | cmd->next = NULL; | |
614 | return cmd; | |
615 | } | |
616 | ||
617 | #define ub_cmdq_peek(sc) ((sc)->cmd_queue.head) | |
618 | ||
619 | /* | |
620 | * The request function is our main entry point | |
621 | */ | |
622 | ||
165125e1 | 623 | static void ub_request_fn(struct request_queue *q) |
1da177e4 | 624 | { |
f4800078 | 625 | struct ub_lun *lun = q->queuedata; |
1da177e4 LT |
626 | struct request *rq; |
627 | ||
628 | while ((rq = elv_next_request(q)) != NULL) { | |
6c1eb8c1 | 629 | if (ub_request_fn_1(lun, rq) != 0) { |
1da177e4 LT |
630 | blk_stop_queue(q); |
631 | break; | |
632 | } | |
633 | } | |
634 | } | |
635 | ||
6c1eb8c1 | 636 | static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) |
1da177e4 | 637 | { |
f4800078 | 638 | struct ub_dev *sc = lun->udev; |
1da177e4 | 639 | struct ub_scsi_cmd *cmd; |
2c26c9e6 PZ |
640 | struct ub_request *urq; |
641 | int n_elem; | |
1da177e4 | 642 | |
d1ad4ea3 | 643 | if (atomic_read(&sc->poison)) { |
1da177e4 | 644 | blkdev_dequeue_request(rq); |
d1ad4ea3 PZ |
645 | ub_end_rq(rq, DID_NO_CONNECT << 16); |
646 | return 0; | |
647 | } | |
648 | ||
649 | if (lun->changed && !blk_pc_request(rq)) { | |
650 | blkdev_dequeue_request(rq); | |
651 | ub_end_rq(rq, SAM_STAT_CHECK_CONDITION); | |
1da177e4 LT |
652 | return 0; |
653 | } | |
654 | ||
2c26c9e6 PZ |
655 | if (lun->urq.rq != NULL) |
656 | return -1; | |
f4800078 | 657 | if ((cmd = ub_get_cmd(lun)) == NULL) |
1da177e4 LT |
658 | return -1; |
659 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); | |
45711f1a | 660 | sg_init_table(cmd->sgv, UB_MAX_REQ_SG); |
1da177e4 LT |
661 | |
662 | blkdev_dequeue_request(rq); | |
2c26c9e6 PZ |
663 | |
664 | urq = &lun->urq; | |
665 | memset(urq, 0, sizeof(struct ub_request)); | |
666 | urq->rq = rq; | |
667 | ||
668 | /* | |
669 | * get scatterlist from block layer | |
670 | */ | |
671 | n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]); | |
672 | if (n_elem < 0) { | |
b5600339 | 673 | /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */ |
2c26c9e6 | 674 | printk(KERN_INFO "%s: failed request map (%d)\n", |
b5600339 | 675 | lun->name, n_elem); |
2c26c9e6 PZ |
676 | goto drop; |
677 | } | |
678 | if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ | |
679 | printk(KERN_WARNING "%s: request with %d segments\n", | |
680 | lun->name, n_elem); | |
681 | goto drop; | |
682 | } | |
683 | urq->nsg = n_elem; | |
684 | sc->sg_stat[n_elem < 5 ? n_elem : 5]++; | |
685 | ||
1da177e4 | 686 | if (blk_pc_request(rq)) { |
2c26c9e6 | 687 | ub_cmd_build_packet(sc, lun, cmd, urq); |
1da177e4 | 688 | } else { |
2c26c9e6 | 689 | ub_cmd_build_block(sc, lun, cmd, urq); |
1da177e4 | 690 | } |
1da177e4 | 691 | cmd->state = UB_CMDST_INIT; |
f4800078 | 692 | cmd->lun = lun; |
1da177e4 | 693 | cmd->done = ub_rw_cmd_done; |
2c26c9e6 | 694 | cmd->back = urq; |
1da177e4 LT |
695 | |
696 | cmd->tag = sc->tagcnt++; | |
2c26c9e6 PZ |
697 | if (ub_submit_scsi(sc, cmd) != 0) |
698 | goto drop; | |
699 | ||
700 | return 0; | |
1da177e4 | 701 | |
2c26c9e6 PZ |
702 | drop: |
703 | ub_put_cmd(lun, cmd); | |
d1ad4ea3 | 704 | ub_end_rq(rq, DID_ERROR << 16); |
1da177e4 LT |
705 | return 0; |
706 | } | |
707 | ||
2c26c9e6 PZ |
708 | static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, |
709 | struct ub_scsi_cmd *cmd, struct ub_request *urq) | |
1da177e4 | 710 | { |
2c26c9e6 | 711 | struct request *rq = urq->rq; |
a1cf96ef | 712 | unsigned int block, nblks; |
1da177e4 LT |
713 | |
714 | if (rq_data_dir(rq) == WRITE) | |
2c26c9e6 | 715 | cmd->dir = UB_DIR_WRITE; |
1da177e4 | 716 | else |
2c26c9e6 | 717 | cmd->dir = UB_DIR_READ; |
1da177e4 | 718 | |
2c26c9e6 PZ |
719 | cmd->nsg = urq->nsg; |
720 | memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); | |
1da177e4 LT |
721 | |
722 | /* | |
723 | * build the command | |
724 | * | |
725 | * The call to blk_queue_hardsect_size() guarantees that request | |
726 | * is aligned, but it is given in terms of 512 byte units, always. | |
727 | */ | |
a1cf96ef PZ |
728 | block = rq->sector >> lun->capacity.bshift; |
729 | nblks = rq->nr_sectors >> lun->capacity.bshift; | |
ba6abf13 | 730 | |
2c26c9e6 | 731 | cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; |
1da177e4 LT |
732 | /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ |
733 | cmd->cdb[2] = block >> 24; | |
734 | cmd->cdb[3] = block >> 16; | |
735 | cmd->cdb[4] = block >> 8; | |
736 | cmd->cdb[5] = block; | |
737 | cmd->cdb[7] = nblks >> 8; | |
738 | cmd->cdb[8] = nblks; | |
739 | cmd->cdb_len = 10; | |
740 | ||
a1cf96ef | 741 | cmd->len = rq->nr_sectors * 512; |
1da177e4 LT |
742 | } |
743 | ||
2c26c9e6 PZ |
744 | static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, |
745 | struct ub_scsi_cmd *cmd, struct ub_request *urq) | |
1da177e4 | 746 | { |
2c26c9e6 | 747 | struct request *rq = urq->rq; |
1da177e4 LT |
748 | |
749 | if (rq->data_len == 0) { | |
750 | cmd->dir = UB_DIR_NONE; | |
751 | } else { | |
752 | if (rq_data_dir(rq) == WRITE) | |
753 | cmd->dir = UB_DIR_WRITE; | |
754 | else | |
755 | cmd->dir = UB_DIR_READ; | |
756 | } | |
a1cf96ef | 757 | |
2c26c9e6 PZ |
758 | cmd->nsg = urq->nsg; |
759 | memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); | |
a1cf96ef PZ |
760 | |
761 | memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); | |
762 | cmd->cdb_len = rq->cmd_len; | |
763 | ||
1da177e4 | 764 | cmd->len = rq->data_len; |
1da177e4 LT |
765 | } |
766 | ||
767 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
768 | { | |
f4800078 | 769 | struct ub_lun *lun = cmd->lun; |
2c26c9e6 PZ |
770 | struct ub_request *urq = cmd->back; |
771 | struct request *rq; | |
d1ad4ea3 | 772 | unsigned int scsi_status; |
1da177e4 | 773 | |
2c26c9e6 PZ |
774 | rq = urq->rq; |
775 | ||
a1cf96ef | 776 | if (cmd->error == 0) { |
a1cf96ef PZ |
777 | if (blk_pc_request(rq)) { |
778 | if (cmd->act_len >= rq->data_len) | |
779 | rq->data_len = 0; | |
780 | else | |
781 | rq->data_len -= cmd->act_len; | |
ba6abf13 | 782 | } |
d1ad4ea3 | 783 | scsi_status = 0; |
a1cf96ef | 784 | } else { |
a1cf96ef PZ |
785 | if (blk_pc_request(rq)) { |
786 | /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ | |
787 | memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); | |
788 | rq->sense_len = UB_SENSE_SIZE; | |
789 | if (sc->top_sense[0] != 0) | |
d1ad4ea3 | 790 | scsi_status = SAM_STAT_CHECK_CONDITION; |
a1cf96ef | 791 | else |
d1ad4ea3 | 792 | scsi_status = DID_ERROR << 16; |
2c26c9e6 PZ |
793 | } else { |
794 | if (cmd->error == -EIO) { | |
795 | if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0) | |
796 | return; | |
797 | } | |
d1ad4ea3 | 798 | scsi_status = SAM_STAT_CHECK_CONDITION; |
a1cf96ef PZ |
799 | } |
800 | } | |
ba6abf13 | 801 | |
2c26c9e6 PZ |
802 | urq->rq = NULL; |
803 | ||
f4800078 | 804 | ub_put_cmd(lun, cmd); |
d1ad4ea3 | 805 | ub_end_rq(rq, scsi_status); |
ba6abf13 | 806 | blk_start_queue(lun->disk->queue); |
1da177e4 LT |
807 | } |
808 | ||
d1ad4ea3 | 809 | static void ub_end_rq(struct request *rq, unsigned int scsi_status) |
1da177e4 | 810 | { |
d1ad4ea3 PZ |
811 | int uptodate; |
812 | ||
813 | if (scsi_status == 0) { | |
814 | uptodate = 1; | |
815 | } else { | |
816 | uptodate = 0; | |
817 | rq->errors = scsi_status; | |
818 | } | |
ab93091d | 819 | end_that_request_first(rq, uptodate, rq->hard_nr_sectors); |
8ffdc655 | 820 | end_that_request_last(rq, uptodate); |
1da177e4 LT |
821 | } |
822 | ||
2c26c9e6 PZ |
823 | static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, |
824 | struct ub_request *urq, struct ub_scsi_cmd *cmd) | |
825 | { | |
826 | ||
827 | if (atomic_read(&sc->poison)) | |
828 | return -ENXIO; | |
829 | ||
2c2e4a2e | 830 | ub_reset_enter(sc, urq->current_try); |
2c26c9e6 PZ |
831 | |
832 | if (urq->current_try >= 3) | |
833 | return -EIO; | |
834 | urq->current_try++; | |
b5600339 PZ |
835 | |
836 | /* Remove this if anyone complains of flooding. */ | |
837 | printk(KERN_DEBUG "%s: dir %c len/act %d/%d " | |
2c26c9e6 PZ |
838 | "[sense %x %02x %02x] retry %d\n", |
839 | sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len, | |
840 | cmd->key, cmd->asc, cmd->ascq, urq->current_try); | |
841 | ||
842 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); | |
843 | ub_cmd_build_block(sc, lun, cmd, urq); | |
844 | ||
845 | cmd->state = UB_CMDST_INIT; | |
846 | cmd->lun = lun; | |
847 | cmd->done = ub_rw_cmd_done; | |
848 | cmd->back = urq; | |
849 | ||
850 | cmd->tag = sc->tagcnt++; | |
851 | ||
852 | #if 0 /* Wasteful */ | |
853 | return ub_submit_scsi(sc, cmd); | |
854 | #else | |
855 | ub_cmdq_add(sc, cmd); | |
856 | return 0; | |
857 | #endif | |
858 | } | |
859 | ||
1da177e4 LT |
860 | /* |
861 | * Submit a regular SCSI operation (not an auto-sense). | |
862 | * | |
863 | * The Iron Law of Good Submit Routine is: | |
864 | * Zero return - callback is done, Nonzero return - callback is not done. | |
865 | * No exceptions. | |
866 | * | |
867 | * Host is assumed locked. | |
1da177e4 LT |
868 | */ |
869 | static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
870 | { | |
871 | ||
872 | if (cmd->state != UB_CMDST_INIT || | |
873 | (cmd->dir != UB_DIR_NONE && cmd->len == 0)) { | |
874 | return -EINVAL; | |
875 | } | |
876 | ||
877 | ub_cmdq_add(sc, cmd); | |
878 | /* | |
879 | * We can call ub_scsi_dispatch(sc) right away here, but it's a little | |
880 | * safer to jump to a tasklet, in case upper layers do something silly. | |
881 | */ | |
882 | tasklet_schedule(&sc->tasklet); | |
883 | return 0; | |
884 | } | |
885 | ||
886 | /* | |
887 | * Submit the first URB for the queued command. | |
888 | * This function does not deal with queueing in any way. | |
889 | */ | |
890 | static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
891 | { | |
892 | struct bulk_cb_wrap *bcb; | |
893 | int rc; | |
894 | ||
895 | bcb = &sc->work_bcb; | |
896 | ||
897 | /* | |
898 | * ``If the allocation length is eighteen or greater, and a device | |
899 | * server returns less than eithteen bytes of data, the application | |
900 | * client should assume that the bytes not transferred would have been | |
901 | * zeroes had the device server returned those bytes.'' | |
902 | * | |
903 | * We zero sense for all commands so that when a packet request | |
904 | * fails it does not return a stale sense. | |
905 | */ | |
906 | memset(&sc->top_sense, 0, UB_SENSE_SIZE); | |
907 | ||
908 | /* set up the command wrapper */ | |
909 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | |
910 | bcb->Tag = cmd->tag; /* Endianness is not important */ | |
911 | bcb->DataTransferLength = cpu_to_le32(cmd->len); | |
912 | bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; | |
f4800078 | 913 | bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0; |
1da177e4 LT |
914 | bcb->Length = cmd->cdb_len; |
915 | ||
916 | /* copy the command payload */ | |
917 | memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE); | |
918 | ||
919 | UB_INIT_COMPLETION(sc->work_done); | |
920 | ||
921 | sc->last_pipe = sc->send_bulk_pipe; | |
922 | usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, | |
923 | bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); | |
1da177e4 LT |
924 | |
925 | /* Fill what we shouldn't be filling, because usb-storage did so. */ | |
926 | sc->work_urb.actual_length = 0; | |
927 | sc->work_urb.error_count = 0; | |
928 | sc->work_urb.status = 0; | |
929 | ||
930 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { | |
931 | /* XXX Clear stalls */ | |
1da177e4 LT |
932 | ub_complete(&sc->work_done); |
933 | return rc; | |
934 | } | |
935 | ||
936 | sc->work_timer.expires = jiffies + UB_URB_TIMEOUT; | |
937 | add_timer(&sc->work_timer); | |
938 | ||
939 | cmd->state = UB_CMDST_CMD; | |
1da177e4 LT |
940 | return 0; |
941 | } | |
942 | ||
943 | /* | |
944 | * Timeout handler. | |
945 | */ | |
946 | static void ub_urb_timeout(unsigned long arg) | |
947 | { | |
948 | struct ub_dev *sc = (struct ub_dev *) arg; | |
949 | unsigned long flags; | |
950 | ||
65b4fe55 | 951 | spin_lock_irqsave(sc->lock, flags); |
b31f821c PZ |
952 | if (!ub_is_completed(&sc->work_done)) |
953 | usb_unlink_urb(&sc->work_urb); | |
65b4fe55 | 954 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 LT |
955 | } |
956 | ||
957 | /* | |
958 | * Completion routine for the work URB. | |
959 | * | |
960 | * This can be called directly from usb_submit_urb (while we have | |
961 | * the sc->lock taken) and from an interrupt (while we do NOT have | |
962 | * the sc->lock taken). Therefore, bounce this off to a tasklet. | |
963 | */ | |
7d12e780 | 964 | static void ub_urb_complete(struct urb *urb) |
1da177e4 LT |
965 | { |
966 | struct ub_dev *sc = urb->context; | |
967 | ||
968 | ub_complete(&sc->work_done); | |
969 | tasklet_schedule(&sc->tasklet); | |
970 | } | |
971 | ||
972 | static void ub_scsi_action(unsigned long _dev) | |
973 | { | |
974 | struct ub_dev *sc = (struct ub_dev *) _dev; | |
975 | unsigned long flags; | |
976 | ||
65b4fe55 | 977 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 | 978 | ub_scsi_dispatch(sc); |
65b4fe55 | 979 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 LT |
980 | } |
981 | ||
982 | static void ub_scsi_dispatch(struct ub_dev *sc) | |
983 | { | |
984 | struct ub_scsi_cmd *cmd; | |
985 | int rc; | |
986 | ||
2c26c9e6 | 987 | while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) { |
1da177e4 LT |
988 | if (cmd->state == UB_CMDST_DONE) { |
989 | ub_cmdq_pop(sc); | |
990 | (*cmd->done)(sc, cmd); | |
991 | } else if (cmd->state == UB_CMDST_INIT) { | |
1da177e4 LT |
992 | if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0) |
993 | break; | |
994 | cmd->error = rc; | |
995 | cmd->state = UB_CMDST_DONE; | |
1da177e4 LT |
996 | } else { |
997 | if (!ub_is_completed(&sc->work_done)) | |
998 | break; | |
b31f821c | 999 | del_timer(&sc->work_timer); |
1da177e4 LT |
1000 | ub_scsi_urb_compl(sc, cmd); |
1001 | } | |
1002 | } | |
1003 | } | |
1004 | ||
1005 | static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1006 | { | |
1007 | struct urb *urb = &sc->work_urb; | |
1008 | struct bulk_cs_wrap *bcs; | |
2c26c9e6 | 1009 | int len; |
1da177e4 LT |
1010 | int rc; |
1011 | ||
1012 | if (atomic_read(&sc->poison)) { | |
2c26c9e6 PZ |
1013 | ub_state_done(sc, cmd, -ENODEV); |
1014 | return; | |
1da177e4 LT |
1015 | } |
1016 | ||
1017 | if (cmd->state == UB_CMDST_CLEAR) { | |
1018 | if (urb->status == -EPIPE) { | |
1019 | /* | |
1020 | * STALL while clearning STALL. | |
1021 | * The control pipe clears itself - nothing to do. | |
1da177e4 | 1022 | */ |
f4800078 PZ |
1023 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1024 | sc->name); | |
1da177e4 LT |
1025 | goto Bad_End; |
1026 | } | |
1027 | ||
1028 | /* | |
1029 | * We ignore the result for the halt clear. | |
1030 | */ | |
1031 | ||
1032 | /* reset the endpoint toggle */ | |
1033 | usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), | |
1034 | usb_pipeout(sc->last_pipe), 0); | |
1035 | ||
1036 | ub_state_sense(sc, cmd); | |
1037 | ||
1038 | } else if (cmd->state == UB_CMDST_CLR2STS) { | |
1039 | if (urb->status == -EPIPE) { | |
f4800078 PZ |
1040 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1041 | sc->name); | |
1da177e4 LT |
1042 | goto Bad_End; |
1043 | } | |
1044 | ||
1045 | /* | |
1046 | * We ignore the result for the halt clear. | |
1047 | */ | |
1048 | ||
1049 | /* reset the endpoint toggle */ | |
1050 | usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), | |
1051 | usb_pipeout(sc->last_pipe), 0); | |
1052 | ||
1053 | ub_state_stat(sc, cmd); | |
1054 | ||
1872bceb PZ |
1055 | } else if (cmd->state == UB_CMDST_CLRRS) { |
1056 | if (urb->status == -EPIPE) { | |
1872bceb PZ |
1057 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1058 | sc->name); | |
1059 | goto Bad_End; | |
1060 | } | |
1061 | ||
1062 | /* | |
1063 | * We ignore the result for the halt clear. | |
1064 | */ | |
1065 | ||
1066 | /* reset the endpoint toggle */ | |
1067 | usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), | |
1068 | usb_pipeout(sc->last_pipe), 0); | |
1069 | ||
1070 | ub_state_stat_counted(sc, cmd); | |
1071 | ||
1da177e4 | 1072 | } else if (cmd->state == UB_CMDST_CMD) { |
2c26c9e6 PZ |
1073 | switch (urb->status) { |
1074 | case 0: | |
1075 | break; | |
1076 | case -EOVERFLOW: | |
1077 | goto Bad_End; | |
1078 | case -EPIPE: | |
1da177e4 LT |
1079 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); |
1080 | if (rc != 0) { | |
1081 | printk(KERN_NOTICE "%s: " | |
f4800078 PZ |
1082 | "unable to submit clear (%d)\n", |
1083 | sc->name, rc); | |
1da177e4 LT |
1084 | /* |
1085 | * This is typically ENOMEM or some other such shit. | |
1086 | * Retrying is pointless. Just do Bad End on it... | |
1087 | */ | |
2c26c9e6 PZ |
1088 | ub_state_done(sc, cmd, rc); |
1089 | return; | |
1da177e4 LT |
1090 | } |
1091 | cmd->state = UB_CMDST_CLEAR; | |
1da177e4 | 1092 | return; |
2c26c9e6 PZ |
1093 | case -ESHUTDOWN: /* unplug */ |
1094 | case -EILSEQ: /* unplug timeout on uhci */ | |
1095 | ub_state_done(sc, cmd, -ENODEV); | |
1096 | return; | |
1097 | default: | |
1da177e4 LT |
1098 | goto Bad_End; |
1099 | } | |
1100 | if (urb->actual_length != US_BULK_CB_WRAP_LEN) { | |
1da177e4 LT |
1101 | goto Bad_End; |
1102 | } | |
1103 | ||
a1cf96ef | 1104 | if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) { |
1da177e4 LT |
1105 | ub_state_stat(sc, cmd); |
1106 | return; | |
1107 | } | |
1108 | ||
a1cf96ef PZ |
1109 | // udelay(125); // usb-storage has this |
1110 | ub_data_start(sc, cmd); | |
1da177e4 LT |
1111 | |
1112 | } else if (cmd->state == UB_CMDST_DATA) { | |
1113 | if (urb->status == -EPIPE) { | |
1114 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); | |
1115 | if (rc != 0) { | |
1116 | printk(KERN_NOTICE "%s: " | |
f4800078 PZ |
1117 | "unable to submit clear (%d)\n", |
1118 | sc->name, rc); | |
2c26c9e6 PZ |
1119 | ub_state_done(sc, cmd, rc); |
1120 | return; | |
1da177e4 LT |
1121 | } |
1122 | cmd->state = UB_CMDST_CLR2STS; | |
1da177e4 LT |
1123 | return; |
1124 | } | |
1125 | if (urb->status == -EOVERFLOW) { | |
1126 | /* | |
1127 | * A babble? Failure, but we must transfer CSW now. | |
1128 | */ | |
1129 | cmd->error = -EOVERFLOW; /* A cheap trick... */ | |
a1cf96ef PZ |
1130 | ub_state_stat(sc, cmd); |
1131 | return; | |
1da177e4 | 1132 | } |
2c26c9e6 PZ |
1133 | |
1134 | if (cmd->dir == UB_DIR_WRITE) { | |
1135 | /* | |
1136 | * Do not continue writes in case of a failure. | |
1137 | * Doing so would cause sectors to be mixed up, | |
1138 | * which is worse than sectors lost. | |
1139 | * | |
1140 | * We must try to read the CSW, or many devices | |
1141 | * get confused. | |
1142 | */ | |
1143 | len = urb->actual_length; | |
1144 | if (urb->status != 0 || | |
1145 | len != cmd->sgv[cmd->current_sg].length) { | |
1146 | cmd->act_len += len; | |
2c26c9e6 PZ |
1147 | |
1148 | cmd->error = -EIO; | |
1149 | ub_state_stat(sc, cmd); | |
1150 | return; | |
1151 | } | |
1152 | ||
1153 | } else { | |
1154 | /* | |
1155 | * If an error occurs on read, we record it, and | |
1156 | * continue to fetch data in order to avoid bubble. | |
1157 | * | |
1158 | * As a small shortcut, we stop if we detect that | |
1159 | * a CSW mixed into data. | |
1160 | */ | |
1161 | if (urb->status != 0) | |
1162 | cmd->error = -EIO; | |
1163 | ||
1164 | len = urb->actual_length; | |
1165 | if (urb->status != 0 || | |
1166 | len != cmd->sgv[cmd->current_sg].length) { | |
1167 | if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN) | |
1168 | goto Bad_End; | |
1169 | } | |
1170 | } | |
1da177e4 | 1171 | |
a1cf96ef | 1172 | cmd->act_len += urb->actual_length; |
1da177e4 | 1173 | |
a1cf96ef PZ |
1174 | if (++cmd->current_sg < cmd->nsg) { |
1175 | ub_data_start(sc, cmd); | |
1176 | return; | |
1177 | } | |
1da177e4 LT |
1178 | ub_state_stat(sc, cmd); |
1179 | ||
1180 | } else if (cmd->state == UB_CMDST_STAT) { | |
1181 | if (urb->status == -EPIPE) { | |
1182 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); | |
1183 | if (rc != 0) { | |
1184 | printk(KERN_NOTICE "%s: " | |
f4800078 PZ |
1185 | "unable to submit clear (%d)\n", |
1186 | sc->name, rc); | |
2c26c9e6 PZ |
1187 | ub_state_done(sc, cmd, rc); |
1188 | return; | |
1da177e4 | 1189 | } |
1872bceb PZ |
1190 | |
1191 | /* | |
1192 | * Having a stall when getting CSW is an error, so | |
1193 | * make sure uppper levels are not oblivious to it. | |
1194 | */ | |
1195 | cmd->error = -EIO; /* A cheap trick... */ | |
1196 | ||
1197 | cmd->state = UB_CMDST_CLRRS; | |
1da177e4 LT |
1198 | return; |
1199 | } | |
2c26c9e6 PZ |
1200 | |
1201 | /* Catch everything, including -EOVERFLOW and other nasties. */ | |
1da177e4 LT |
1202 | if (urb->status != 0) |
1203 | goto Bad_End; | |
1204 | ||
1205 | if (urb->actual_length == 0) { | |
1872bceb | 1206 | ub_state_stat_counted(sc, cmd); |
1da177e4 LT |
1207 | return; |
1208 | } | |
1209 | ||
1210 | /* | |
1211 | * Check the returned Bulk protocol status. | |
1872bceb | 1212 | * The status block has to be validated first. |
1da177e4 LT |
1213 | */ |
1214 | ||
1215 | bcs = &sc->work_bcs; | |
1872bceb PZ |
1216 | |
1217 | if (sc->signature == cpu_to_le32(0)) { | |
1da177e4 | 1218 | /* |
1872bceb PZ |
1219 | * This is the first reply, so do not perform the check. |
1220 | * Instead, remember the signature the device uses | |
1221 | * for future checks. But do not allow a nul. | |
1da177e4 | 1222 | */ |
1872bceb PZ |
1223 | sc->signature = bcs->Signature; |
1224 | if (sc->signature == cpu_to_le32(0)) { | |
1225 | ub_state_stat_counted(sc, cmd); | |
1226 | return; | |
1227 | } | |
1228 | } else { | |
1229 | if (bcs->Signature != sc->signature) { | |
1230 | ub_state_stat_counted(sc, cmd); | |
1231 | return; | |
1232 | } | |
1da177e4 | 1233 | } |
1da177e4 LT |
1234 | |
1235 | if (bcs->Tag != cmd->tag) { | |
1236 | /* | |
1237 | * This usually happens when we disagree with the | |
1238 | * device's microcode about something. For instance, | |
1239 | * a few of them throw this after timeouts. They buffer | |
1240 | * commands and reply at commands we timed out before. | |
1241 | * Without flushing these replies we loop forever. | |
1242 | */ | |
1872bceb | 1243 | ub_state_stat_counted(sc, cmd); |
1da177e4 LT |
1244 | return; |
1245 | } | |
1246 | ||
2c26c9e6 PZ |
1247 | len = le32_to_cpu(bcs->Residue); |
1248 | if (len != cmd->len - cmd->act_len) { | |
1872bceb PZ |
1249 | /* |
1250 | * It is all right to transfer less, the caller has | |
1251 | * to check. But it's not all right if the device | |
1252 | * counts disagree with our counts. | |
1253 | */ | |
1872bceb PZ |
1254 | goto Bad_End; |
1255 | } | |
1256 | ||
1da177e4 LT |
1257 | switch (bcs->Status) { |
1258 | case US_BULK_STAT_OK: | |
1259 | break; | |
1260 | case US_BULK_STAT_FAIL: | |
1261 | ub_state_sense(sc, cmd); | |
1262 | return; | |
1263 | case US_BULK_STAT_PHASE: | |
1da177e4 LT |
1264 | goto Bad_End; |
1265 | default: | |
1266 | printk(KERN_INFO "%s: unknown CSW status 0x%x\n", | |
1267 | sc->name, bcs->Status); | |
2c26c9e6 PZ |
1268 | ub_state_done(sc, cmd, -EINVAL); |
1269 | return; | |
1da177e4 LT |
1270 | } |
1271 | ||
1272 | /* Not zeroing error to preserve a babble indicator */ | |
1872bceb PZ |
1273 | if (cmd->error != 0) { |
1274 | ub_state_sense(sc, cmd); | |
1275 | return; | |
1276 | } | |
1da177e4 | 1277 | cmd->state = UB_CMDST_DONE; |
1da177e4 LT |
1278 | ub_cmdq_pop(sc); |
1279 | (*cmd->done)(sc, cmd); | |
1280 | ||
1281 | } else if (cmd->state == UB_CMDST_SENSE) { | |
1282 | ub_state_done(sc, cmd, -EIO); | |
1283 | ||
1284 | } else { | |
1285 | printk(KERN_WARNING "%s: " | |
f4800078 PZ |
1286 | "wrong command state %d\n", |
1287 | sc->name, cmd->state); | |
2c26c9e6 PZ |
1288 | ub_state_done(sc, cmd, -EINVAL); |
1289 | return; | |
1da177e4 LT |
1290 | } |
1291 | return; | |
1292 | ||
1293 | Bad_End: /* Little Excel is dead */ | |
1294 | ub_state_done(sc, cmd, -EIO); | |
1295 | } | |
1296 | ||
a1cf96ef PZ |
1297 | /* |
1298 | * Factorization helper for the command state machine: | |
1299 | * Initiate a data segment transfer. | |
1300 | */ | |
1301 | static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1302 | { | |
1303 | struct scatterlist *sg = &cmd->sgv[cmd->current_sg]; | |
1304 | int pipe; | |
1305 | int rc; | |
1306 | ||
1307 | UB_INIT_COMPLETION(sc->work_done); | |
1308 | ||
1309 | if (cmd->dir == UB_DIR_READ) | |
1310 | pipe = sc->recv_bulk_pipe; | |
1311 | else | |
1312 | pipe = sc->send_bulk_pipe; | |
1313 | sc->last_pipe = pipe; | |
45711f1a JA |
1314 | usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg), |
1315 | sg->length, ub_urb_complete, sc); | |
a1cf96ef PZ |
1316 | sc->work_urb.actual_length = 0; |
1317 | sc->work_urb.error_count = 0; | |
1318 | sc->work_urb.status = 0; | |
1319 | ||
1320 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { | |
1321 | /* XXX Clear stalls */ | |
a1cf96ef PZ |
1322 | ub_complete(&sc->work_done); |
1323 | ub_state_done(sc, cmd, rc); | |
1324 | return; | |
1325 | } | |
1326 | ||
1327 | sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; | |
1328 | add_timer(&sc->work_timer); | |
1329 | ||
1330 | cmd->state = UB_CMDST_DATA; | |
a1cf96ef PZ |
1331 | } |
1332 | ||
1da177e4 LT |
1333 | /* |
1334 | * Factorization helper for the command state machine: | |
1335 | * Finish the command. | |
1336 | */ | |
1337 | static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) | |
1338 | { | |
1339 | ||
1340 | cmd->error = rc; | |
1341 | cmd->state = UB_CMDST_DONE; | |
1da177e4 LT |
1342 | ub_cmdq_pop(sc); |
1343 | (*cmd->done)(sc, cmd); | |
1344 | } | |
1345 | ||
1346 | /* | |
1347 | * Factorization helper for the command state machine: | |
1348 | * Submit a CSW read. | |
1349 | */ | |
1872bceb | 1350 | static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) |
1da177e4 LT |
1351 | { |
1352 | int rc; | |
1353 | ||
1354 | UB_INIT_COMPLETION(sc->work_done); | |
1355 | ||
1356 | sc->last_pipe = sc->recv_bulk_pipe; | |
1357 | usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, | |
1358 | &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); | |
1da177e4 LT |
1359 | sc->work_urb.actual_length = 0; |
1360 | sc->work_urb.error_count = 0; | |
1361 | sc->work_urb.status = 0; | |
1362 | ||
1363 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { | |
1364 | /* XXX Clear stalls */ | |
1da177e4 LT |
1365 | ub_complete(&sc->work_done); |
1366 | ub_state_done(sc, cmd, rc); | |
1872bceb | 1367 | return -1; |
1da177e4 LT |
1368 | } |
1369 | ||
1370 | sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; | |
1371 | add_timer(&sc->work_timer); | |
1872bceb | 1372 | return 0; |
1da177e4 LT |
1373 | } |
1374 | ||
1375 | /* | |
1376 | * Factorization helper for the command state machine: | |
1377 | * Submit a CSW read and go to STAT state. | |
1378 | */ | |
1379 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1380 | { | |
1872bceb PZ |
1381 | |
1382 | if (__ub_state_stat(sc, cmd) != 0) | |
1383 | return; | |
1da177e4 LT |
1384 | |
1385 | cmd->stat_count = 0; | |
1386 | cmd->state = UB_CMDST_STAT; | |
1872bceb PZ |
1387 | } |
1388 | ||
1389 | /* | |
1390 | * Factorization helper for the command state machine: | |
1391 | * Submit a CSW read and go to STAT state with counter (along [C] path). | |
1392 | */ | |
1393 | static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1394 | { | |
1395 | ||
1396 | if (++cmd->stat_count >= 4) { | |
1397 | ub_state_sense(sc, cmd); | |
1398 | return; | |
1399 | } | |
1400 | ||
1401 | if (__ub_state_stat(sc, cmd) != 0) | |
1402 | return; | |
1403 | ||
1404 | cmd->state = UB_CMDST_STAT; | |
1da177e4 LT |
1405 | } |
1406 | ||
1407 | /* | |
1408 | * Factorization helper for the command state machine: | |
1409 | * Submit a REQUEST SENSE and go to SENSE state. | |
1410 | */ | |
1411 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1412 | { | |
1413 | struct ub_scsi_cmd *scmd; | |
a1cf96ef | 1414 | struct scatterlist *sg; |
1da177e4 LT |
1415 | int rc; |
1416 | ||
1417 | if (cmd->cdb[0] == REQUEST_SENSE) { | |
1418 | rc = -EPIPE; | |
1419 | goto error; | |
1420 | } | |
1421 | ||
1422 | scmd = &sc->top_rqs_cmd; | |
a1cf96ef | 1423 | memset(scmd, 0, sizeof(struct ub_scsi_cmd)); |
1da177e4 LT |
1424 | scmd->cdb[0] = REQUEST_SENSE; |
1425 | scmd->cdb[4] = UB_SENSE_SIZE; | |
1426 | scmd->cdb_len = 6; | |
1427 | scmd->dir = UB_DIR_READ; | |
1428 | scmd->state = UB_CMDST_INIT; | |
a1cf96ef PZ |
1429 | scmd->nsg = 1; |
1430 | sg = &scmd->sgv[0]; | |
45711f1a | 1431 | sg_set_page(sg, virt_to_page(sc->top_sense)); |
38ffdd62 | 1432 | sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1); |
a1cf96ef | 1433 | sg->length = UB_SENSE_SIZE; |
1da177e4 | 1434 | scmd->len = UB_SENSE_SIZE; |
f4800078 | 1435 | scmd->lun = cmd->lun; |
1da177e4 LT |
1436 | scmd->done = ub_top_sense_done; |
1437 | scmd->back = cmd; | |
1438 | ||
1439 | scmd->tag = sc->tagcnt++; | |
1440 | ||
1441 | cmd->state = UB_CMDST_SENSE; | |
1da177e4 LT |
1442 | |
1443 | ub_cmdq_insert(sc, scmd); | |
1444 | return; | |
1445 | ||
1446 | error: | |
1447 | ub_state_done(sc, cmd, rc); | |
1448 | } | |
1449 | ||
1450 | /* | |
1451 | * A helper for the command's state machine: | |
1452 | * Submit a stall clear. | |
1453 | */ | |
1454 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |
1455 | int stalled_pipe) | |
1456 | { | |
1457 | int endp; | |
1458 | struct usb_ctrlrequest *cr; | |
1459 | int rc; | |
1460 | ||
1461 | endp = usb_pipeendpoint(stalled_pipe); | |
1462 | if (usb_pipein (stalled_pipe)) | |
1463 | endp |= USB_DIR_IN; | |
1464 | ||
1465 | cr = &sc->work_cr; | |
1466 | cr->bRequestType = USB_RECIP_ENDPOINT; | |
1467 | cr->bRequest = USB_REQ_CLEAR_FEATURE; | |
1468 | cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); | |
1469 | cr->wIndex = cpu_to_le16(endp); | |
1470 | cr->wLength = cpu_to_le16(0); | |
1471 | ||
1472 | UB_INIT_COMPLETION(sc->work_done); | |
1473 | ||
1474 | usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, | |
1475 | (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); | |
1da177e4 LT |
1476 | sc->work_urb.actual_length = 0; |
1477 | sc->work_urb.error_count = 0; | |
1478 | sc->work_urb.status = 0; | |
1479 | ||
1480 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { | |
1481 | ub_complete(&sc->work_done); | |
1482 | return rc; | |
1483 | } | |
1484 | ||
1485 | sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT; | |
1486 | add_timer(&sc->work_timer); | |
1487 | return 0; | |
1488 | } | |
1489 | ||
1490 | /* | |
1491 | */ | |
1492 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) | |
1493 | { | |
a1cf96ef | 1494 | unsigned char *sense = sc->top_sense; |
1da177e4 LT |
1495 | struct ub_scsi_cmd *cmd; |
1496 | ||
1da177e4 LT |
1497 | /* |
1498 | * Find the command which triggered the unit attention or a check, | |
1499 | * save the sense into it, and advance its state machine. | |
1500 | */ | |
1501 | if ((cmd = ub_cmdq_peek(sc)) == NULL) { | |
1502 | printk(KERN_WARNING "%s: sense done while idle\n", sc->name); | |
1503 | return; | |
1504 | } | |
1505 | if (cmd != scmd->back) { | |
1506 | printk(KERN_WARNING "%s: " | |
f4800078 PZ |
1507 | "sense done for wrong command 0x%x\n", |
1508 | sc->name, cmd->tag); | |
1da177e4 LT |
1509 | return; |
1510 | } | |
1511 | if (cmd->state != UB_CMDST_SENSE) { | |
1512 | printk(KERN_WARNING "%s: " | |
f4800078 PZ |
1513 | "sense done with bad cmd state %d\n", |
1514 | sc->name, cmd->state); | |
1da177e4 LT |
1515 | return; |
1516 | } | |
1517 | ||
952ba222 PZ |
1518 | /* |
1519 | * Ignoring scmd->act_len, because the buffer was pre-zeroed. | |
1520 | */ | |
1da177e4 LT |
1521 | cmd->key = sense[2] & 0x0F; |
1522 | cmd->asc = sense[12]; | |
1523 | cmd->ascq = sense[13]; | |
1524 | ||
1525 | ub_scsi_urb_compl(sc, cmd); | |
1526 | } | |
1527 | ||
2c26c9e6 PZ |
1528 | /* |
1529 | * Reset management | |
2c2e4a2e PZ |
1530 | * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing. |
1531 | * XXX Make usb_sync_reset asynchronous. | |
2c26c9e6 PZ |
1532 | */ |
1533 | ||
2c2e4a2e | 1534 | static void ub_reset_enter(struct ub_dev *sc, int try) |
2c26c9e6 PZ |
1535 | { |
1536 | ||
1537 | if (sc->reset) { | |
1538 | /* This happens often on multi-LUN devices. */ | |
1539 | return; | |
1540 | } | |
2c2e4a2e | 1541 | sc->reset = try + 1; |
2c26c9e6 PZ |
1542 | |
1543 | #if 0 /* Not needed because the disconnect waits for us. */ | |
1544 | unsigned long flags; | |
1545 | spin_lock_irqsave(&ub_lock, flags); | |
1546 | sc->openc++; | |
1547 | spin_unlock_irqrestore(&ub_lock, flags); | |
1548 | #endif | |
1549 | ||
1550 | #if 0 /* We let them stop themselves. */ | |
2c26c9e6 | 1551 | struct ub_lun *lun; |
a69228de | 1552 | list_for_each_entry(lun, &sc->luns, link) { |
2c26c9e6 PZ |
1553 | blk_stop_queue(lun->disk->queue); |
1554 | } | |
1555 | #endif | |
1556 | ||
1557 | schedule_work(&sc->reset_work); | |
1558 | } | |
1559 | ||
c4028958 | 1560 | static void ub_reset_task(struct work_struct *work) |
2c26c9e6 | 1561 | { |
c4028958 | 1562 | struct ub_dev *sc = container_of(work, struct ub_dev, reset_work); |
2c26c9e6 | 1563 | unsigned long flags; |
2c26c9e6 PZ |
1564 | struct ub_lun *lun; |
1565 | int lkr, rc; | |
1566 | ||
1567 | if (!sc->reset) { | |
1568 | printk(KERN_WARNING "%s: Running reset unrequested\n", | |
1569 | sc->name); | |
1570 | return; | |
1571 | } | |
1572 | ||
1573 | if (atomic_read(&sc->poison)) { | |
b5600339 | 1574 | ; |
2c2e4a2e PZ |
1575 | } else if ((sc->reset & 1) == 0) { |
1576 | ub_sync_reset(sc); | |
1577 | msleep(700); /* usb-storage sleeps 6s (!) */ | |
1578 | ub_probe_clear_stall(sc, sc->recv_bulk_pipe); | |
1579 | ub_probe_clear_stall(sc, sc->send_bulk_pipe); | |
2c26c9e6 | 1580 | } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) { |
b5600339 | 1581 | ; |
2c26c9e6 PZ |
1582 | } else { |
1583 | if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) { | |
1584 | printk(KERN_NOTICE | |
1585 | "%s: usb_lock_device_for_reset failed (%d)\n", | |
1586 | sc->name, lkr); | |
1587 | } else { | |
1588 | rc = usb_reset_device(sc->dev); | |
1589 | if (rc < 0) { | |
1590 | printk(KERN_NOTICE "%s: " | |
1591 | "usb_lock_device_for_reset failed (%d)\n", | |
1592 | sc->name, rc); | |
1593 | } | |
1594 | ||
1595 | if (lkr) | |
1596 | usb_unlock_device(sc->dev); | |
1597 | } | |
1598 | } | |
1599 | ||
1600 | /* | |
1601 | * In theory, no commands can be running while reset is active, | |
1602 | * so nobody can ask for another reset, and so we do not need any | |
1603 | * queues of resets or anything. We do need a spinlock though, | |
1604 | * to interact with block layer. | |
1605 | */ | |
65b4fe55 | 1606 | spin_lock_irqsave(sc->lock, flags); |
2c26c9e6 PZ |
1607 | sc->reset = 0; |
1608 | tasklet_schedule(&sc->tasklet); | |
a69228de | 1609 | list_for_each_entry(lun, &sc->luns, link) { |
2c26c9e6 PZ |
1610 | blk_start_queue(lun->disk->queue); |
1611 | } | |
1612 | wake_up(&sc->reset_wait); | |
65b4fe55 | 1613 | spin_unlock_irqrestore(sc->lock, flags); |
2c26c9e6 PZ |
1614 | } |
1615 | ||
1da177e4 LT |
1616 | /* |
1617 | * This is called from a process context. | |
1618 | */ | |
f4800078 | 1619 | static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) |
1da177e4 LT |
1620 | { |
1621 | ||
f4800078 | 1622 | lun->readonly = 0; /* XXX Query this from the device */ |
1da177e4 | 1623 | |
f4800078 PZ |
1624 | lun->capacity.nsec = 0; |
1625 | lun->capacity.bsize = 512; | |
1626 | lun->capacity.bshift = 0; | |
1da177e4 | 1627 | |
f4800078 | 1628 | if (ub_sync_tur(sc, lun) != 0) |
1da177e4 | 1629 | return; /* Not ready */ |
f4800078 | 1630 | lun->changed = 0; |
1da177e4 | 1631 | |
f4800078 | 1632 | if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { |
1da177e4 LT |
1633 | /* |
1634 | * The retry here means something is wrong, either with the | |
1635 | * device, with the transport, or with our code. | |
1636 | * We keep this because sd.c has retries for capacity. | |
1637 | */ | |
f4800078 PZ |
1638 | if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { |
1639 | lun->capacity.nsec = 0; | |
1640 | lun->capacity.bsize = 512; | |
1641 | lun->capacity.bshift = 0; | |
1da177e4 LT |
1642 | } |
1643 | } | |
1644 | } | |
1645 | ||
1646 | /* | |
1647 | * The open funcion. | |
1648 | * This is mostly needed to keep refcounting, but also to support | |
1649 | * media checks on removable media drives. | |
1650 | */ | |
1651 | static int ub_bd_open(struct inode *inode, struct file *filp) | |
1652 | { | |
1653 | struct gendisk *disk = inode->i_bdev->bd_disk; | |
41fea55e PZ |
1654 | struct ub_lun *lun = disk->private_data; |
1655 | struct ub_dev *sc = lun->udev; | |
1da177e4 LT |
1656 | unsigned long flags; |
1657 | int rc; | |
1658 | ||
1da177e4 LT |
1659 | spin_lock_irqsave(&ub_lock, flags); |
1660 | if (atomic_read(&sc->poison)) { | |
1661 | spin_unlock_irqrestore(&ub_lock, flags); | |
1662 | return -ENXIO; | |
1663 | } | |
1664 | sc->openc++; | |
1665 | spin_unlock_irqrestore(&ub_lock, flags); | |
1666 | ||
f4800078 | 1667 | if (lun->removable || lun->readonly) |
1da177e4 LT |
1668 | check_disk_change(inode->i_bdev); |
1669 | ||
1670 | /* | |
1671 | * The sd.c considers ->media_present and ->changed not equivalent, | |
1672 | * under some pretty murky conditions (a failure of READ CAPACITY). | |
1673 | * We may need it one day. | |
1674 | */ | |
f4800078 | 1675 | if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) { |
1da177e4 LT |
1676 | rc = -ENOMEDIUM; |
1677 | goto err_open; | |
1678 | } | |
1679 | ||
f4800078 | 1680 | if (lun->readonly && (filp->f_mode & FMODE_WRITE)) { |
1da177e4 LT |
1681 | rc = -EROFS; |
1682 | goto err_open; | |
1683 | } | |
1684 | ||
1685 | return 0; | |
1686 | ||
1687 | err_open: | |
1688 | ub_put(sc); | |
1689 | return rc; | |
1690 | } | |
1691 | ||
1692 | /* | |
1693 | */ | |
1694 | static int ub_bd_release(struct inode *inode, struct file *filp) | |
1695 | { | |
1696 | struct gendisk *disk = inode->i_bdev->bd_disk; | |
f4800078 PZ |
1697 | struct ub_lun *lun = disk->private_data; |
1698 | struct ub_dev *sc = lun->udev; | |
1da177e4 LT |
1699 | |
1700 | ub_put(sc); | |
1701 | return 0; | |
1702 | } | |
1703 | ||
1704 | /* | |
1705 | * The ioctl interface. | |
1706 | */ | |
1707 | static int ub_bd_ioctl(struct inode *inode, struct file *filp, | |
1708 | unsigned int cmd, unsigned long arg) | |
1709 | { | |
1710 | struct gendisk *disk = inode->i_bdev->bd_disk; | |
1711 | void __user *usermem = (void __user *) arg; | |
1712 | ||
45e79a3a | 1713 | return scsi_cmd_ioctl(filp, disk->queue, disk, cmd, usermem); |
1da177e4 LT |
1714 | } |
1715 | ||
1716 | /* | |
1717 | * This is called once a new disk was seen by the block layer or by ub_probe(). | |
1718 | * The main onjective here is to discover the features of the media such as | |
1719 | * the capacity, read-only status, etc. USB storage generally does not | |
1720 | * need to be spun up, but if we needed it, this would be the place. | |
1721 | * | |
1722 | * This call can sleep. | |
1723 | * | |
1724 | * The return code is not used. | |
1725 | */ | |
1726 | static int ub_bd_revalidate(struct gendisk *disk) | |
1727 | { | |
f4800078 PZ |
1728 | struct ub_lun *lun = disk->private_data; |
1729 | ||
1730 | ub_revalidate(lun->udev, lun); | |
1da177e4 LT |
1731 | |
1732 | /* XXX Support sector size switching like in sr.c */ | |
f4800078 PZ |
1733 | blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); |
1734 | set_capacity(disk, lun->capacity.nsec); | |
1735 | // set_disk_ro(sdkp->disk, lun->readonly); | |
1da177e4 LT |
1736 | |
1737 | return 0; | |
1738 | } | |
1739 | ||
1740 | /* | |
1741 | * The check is called by the block layer to verify if the media | |
1742 | * is still available. It is supposed to be harmless, lightweight and | |
1743 | * non-intrusive in case the media was not changed. | |
1744 | * | |
1745 | * This call can sleep. | |
1746 | * | |
1747 | * The return code is bool! | |
1748 | */ | |
1749 | static int ub_bd_media_changed(struct gendisk *disk) | |
1750 | { | |
f4800078 | 1751 | struct ub_lun *lun = disk->private_data; |
1da177e4 | 1752 | |
f4800078 | 1753 | if (!lun->removable) |
1da177e4 LT |
1754 | return 0; |
1755 | ||
1756 | /* | |
1757 | * We clean checks always after every command, so this is not | |
1758 | * as dangerous as it looks. If the TEST_UNIT_READY fails here, | |
1759 | * the device is actually not ready with operator or software | |
1760 | * intervention required. One dangerous item might be a drive which | |
1761 | * spins itself down, and come the time to write dirty pages, this | |
1762 | * will fail, then block layer discards the data. Since we never | |
1763 | * spin drives up, such devices simply cannot be used with ub anyway. | |
1764 | */ | |
f4800078 PZ |
1765 | if (ub_sync_tur(lun->udev, lun) != 0) { |
1766 | lun->changed = 1; | |
1da177e4 LT |
1767 | return 1; |
1768 | } | |
1769 | ||
f4800078 | 1770 | return lun->changed; |
1da177e4 LT |
1771 | } |
1772 | ||
1773 | static struct block_device_operations ub_bd_fops = { | |
1774 | .owner = THIS_MODULE, | |
1775 | .open = ub_bd_open, | |
1776 | .release = ub_bd_release, | |
1777 | .ioctl = ub_bd_ioctl, | |
1778 | .media_changed = ub_bd_media_changed, | |
1779 | .revalidate_disk = ub_bd_revalidate, | |
1780 | }; | |
1781 | ||
1782 | /* | |
1783 | * Common ->done routine for commands executed synchronously. | |
1784 | */ | |
1785 | static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1786 | { | |
1787 | struct completion *cop = cmd->back; | |
1788 | complete(cop); | |
1789 | } | |
1790 | ||
1791 | /* | |
1792 | * Test if the device has a check condition on it, synchronously. | |
1793 | */ | |
f4800078 | 1794 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun) |
1da177e4 LT |
1795 | { |
1796 | struct ub_scsi_cmd *cmd; | |
1797 | enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; | |
1798 | unsigned long flags; | |
1799 | struct completion compl; | |
1800 | int rc; | |
1801 | ||
1802 | init_completion(&compl); | |
1803 | ||
1804 | rc = -ENOMEM; | |
29da7937 | 1805 | if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) |
1da177e4 | 1806 | goto err_alloc; |
1da177e4 LT |
1807 | |
1808 | cmd->cdb[0] = TEST_UNIT_READY; | |
1809 | cmd->cdb_len = 6; | |
1810 | cmd->dir = UB_DIR_NONE; | |
1811 | cmd->state = UB_CMDST_INIT; | |
f4800078 | 1812 | cmd->lun = lun; /* This may be NULL, but that's ok */ |
1da177e4 LT |
1813 | cmd->done = ub_probe_done; |
1814 | cmd->back = &compl; | |
1815 | ||
65b4fe55 | 1816 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 LT |
1817 | cmd->tag = sc->tagcnt++; |
1818 | ||
1819 | rc = ub_submit_scsi(sc, cmd); | |
65b4fe55 | 1820 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 | 1821 | |
b5600339 | 1822 | if (rc != 0) |
1da177e4 | 1823 | goto err_submit; |
1da177e4 LT |
1824 | |
1825 | wait_for_completion(&compl); | |
1826 | ||
1827 | rc = cmd->error; | |
1828 | ||
1829 | if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */ | |
1830 | rc = cmd->key; | |
1831 | ||
1832 | err_submit: | |
1833 | kfree(cmd); | |
1834 | err_alloc: | |
1835 | return rc; | |
1836 | } | |
1837 | ||
1838 | /* | |
1839 | * Read the SCSI capacity synchronously (for probing). | |
1840 | */ | |
f4800078 PZ |
1841 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, |
1842 | struct ub_capacity *ret) | |
1da177e4 LT |
1843 | { |
1844 | struct ub_scsi_cmd *cmd; | |
a1cf96ef | 1845 | struct scatterlist *sg; |
1da177e4 LT |
1846 | char *p; |
1847 | enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; | |
1848 | unsigned long flags; | |
1849 | unsigned int bsize, shift; | |
1850 | unsigned long nsec; | |
1851 | struct completion compl; | |
1852 | int rc; | |
1853 | ||
1854 | init_completion(&compl); | |
1855 | ||
1856 | rc = -ENOMEM; | |
29da7937 | 1857 | if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) |
1da177e4 | 1858 | goto err_alloc; |
1da177e4 LT |
1859 | p = (char *)cmd + sizeof(struct ub_scsi_cmd); |
1860 | ||
1861 | cmd->cdb[0] = 0x25; | |
1862 | cmd->cdb_len = 10; | |
1863 | cmd->dir = UB_DIR_READ; | |
1864 | cmd->state = UB_CMDST_INIT; | |
a1cf96ef PZ |
1865 | cmd->nsg = 1; |
1866 | sg = &cmd->sgv[0]; | |
45711f1a | 1867 | sg_set_page(sg, virt_to_page(p)); |
38ffdd62 | 1868 | sg->offset = (unsigned long)p & (PAGE_SIZE-1); |
a1cf96ef | 1869 | sg->length = 8; |
1da177e4 | 1870 | cmd->len = 8; |
f4800078 | 1871 | cmd->lun = lun; |
1da177e4 LT |
1872 | cmd->done = ub_probe_done; |
1873 | cmd->back = &compl; | |
1874 | ||
65b4fe55 | 1875 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 LT |
1876 | cmd->tag = sc->tagcnt++; |
1877 | ||
1878 | rc = ub_submit_scsi(sc, cmd); | |
65b4fe55 | 1879 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 | 1880 | |
b5600339 | 1881 | if (rc != 0) |
1da177e4 | 1882 | goto err_submit; |
1da177e4 LT |
1883 | |
1884 | wait_for_completion(&compl); | |
1885 | ||
1886 | if (cmd->error != 0) { | |
1da177e4 LT |
1887 | rc = -EIO; |
1888 | goto err_read; | |
1889 | } | |
1890 | if (cmd->act_len != 8) { | |
1da177e4 LT |
1891 | rc = -EIO; |
1892 | goto err_read; | |
1893 | } | |
1894 | ||
1895 | /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */ | |
1896 | nsec = be32_to_cpu(*(__be32 *)p) + 1; | |
1897 | bsize = be32_to_cpu(*(__be32 *)(p + 4)); | |
1898 | switch (bsize) { | |
1899 | case 512: shift = 0; break; | |
1900 | case 1024: shift = 1; break; | |
1901 | case 2048: shift = 2; break; | |
1902 | case 4096: shift = 3; break; | |
1903 | default: | |
1da177e4 LT |
1904 | rc = -EDOM; |
1905 | goto err_inv_bsize; | |
1906 | } | |
1907 | ||
1908 | ret->bsize = bsize; | |
1909 | ret->bshift = shift; | |
1910 | ret->nsec = nsec << shift; | |
1911 | rc = 0; | |
1912 | ||
1913 | err_inv_bsize: | |
1914 | err_read: | |
1915 | err_submit: | |
1916 | kfree(cmd); | |
1917 | err_alloc: | |
1918 | return rc; | |
1919 | } | |
1920 | ||
1921 | /* | |
1922 | */ | |
7d12e780 | 1923 | static void ub_probe_urb_complete(struct urb *urb) |
1da177e4 LT |
1924 | { |
1925 | struct completion *cop = urb->context; | |
1926 | complete(cop); | |
1927 | } | |
1928 | ||
1929 | static void ub_probe_timeout(unsigned long arg) | |
1930 | { | |
1931 | struct completion *cop = (struct completion *) arg; | |
1932 | complete(cop); | |
1933 | } | |
1934 | ||
2c2e4a2e PZ |
1935 | /* |
1936 | * Reset with a Bulk reset. | |
1937 | */ | |
1938 | static int ub_sync_reset(struct ub_dev *sc) | |
1939 | { | |
1940 | int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; | |
1941 | struct usb_ctrlrequest *cr; | |
1942 | struct completion compl; | |
1943 | struct timer_list timer; | |
1944 | int rc; | |
1945 | ||
1946 | init_completion(&compl); | |
1947 | ||
1948 | cr = &sc->work_cr; | |
1949 | cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE; | |
1950 | cr->bRequest = US_BULK_RESET_REQUEST; | |
1951 | cr->wValue = cpu_to_le16(0); | |
1952 | cr->wIndex = cpu_to_le16(ifnum); | |
1953 | cr->wLength = cpu_to_le16(0); | |
1954 | ||
1955 | usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, | |
1956 | (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); | |
1957 | sc->work_urb.actual_length = 0; | |
1958 | sc->work_urb.error_count = 0; | |
1959 | sc->work_urb.status = 0; | |
1960 | ||
1961 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { | |
1962 | printk(KERN_WARNING | |
1963 | "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc); | |
1964 | return rc; | |
1965 | } | |
1966 | ||
1967 | init_timer(&timer); | |
1968 | timer.function = ub_probe_timeout; | |
1969 | timer.data = (unsigned long) &compl; | |
1970 | timer.expires = jiffies + UB_CTRL_TIMEOUT; | |
1971 | add_timer(&timer); | |
1972 | ||
1973 | wait_for_completion(&compl); | |
1974 | ||
1975 | del_timer_sync(&timer); | |
1976 | usb_kill_urb(&sc->work_urb); | |
1977 | ||
1978 | return sc->work_urb.status; | |
1979 | } | |
1980 | ||
f4800078 PZ |
1981 | /* |
1982 | * Get number of LUNs by the way of Bulk GetMaxLUN command. | |
1983 | */ | |
1984 | static int ub_sync_getmaxlun(struct ub_dev *sc) | |
1985 | { | |
1986 | int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; | |
1987 | unsigned char *p; | |
1988 | enum { ALLOC_SIZE = 1 }; | |
1989 | struct usb_ctrlrequest *cr; | |
1990 | struct completion compl; | |
1991 | struct timer_list timer; | |
1992 | int nluns; | |
1993 | int rc; | |
1994 | ||
1995 | init_completion(&compl); | |
1996 | ||
1997 | rc = -ENOMEM; | |
1998 | if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) | |
1999 | goto err_alloc; | |
2000 | *p = 55; | |
2001 | ||
2002 | cr = &sc->work_cr; | |
2003 | cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; | |
2004 | cr->bRequest = US_BULK_GET_MAX_LUN; | |
2005 | cr->wValue = cpu_to_le16(0); | |
2006 | cr->wIndex = cpu_to_le16(ifnum); | |
2007 | cr->wLength = cpu_to_le16(1); | |
2008 | ||
2009 | usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, | |
2010 | (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); | |
f4800078 PZ |
2011 | sc->work_urb.actual_length = 0; |
2012 | sc->work_urb.error_count = 0; | |
2013 | sc->work_urb.status = 0; | |
2014 | ||
b5600339 | 2015 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) |
f4800078 | 2016 | goto err_submit; |
f4800078 PZ |
2017 | |
2018 | init_timer(&timer); | |
2019 | timer.function = ub_probe_timeout; | |
2020 | timer.data = (unsigned long) &compl; | |
2021 | timer.expires = jiffies + UB_CTRL_TIMEOUT; | |
2022 | add_timer(&timer); | |
2023 | ||
2024 | wait_for_completion(&compl); | |
2025 | ||
2026 | del_timer_sync(&timer); | |
2027 | usb_kill_urb(&sc->work_urb); | |
2028 | ||
b5600339 | 2029 | if ((rc = sc->work_urb.status) < 0) |
64bd8453 | 2030 | goto err_io; |
64bd8453 | 2031 | |
f4800078 | 2032 | if (sc->work_urb.actual_length != 1) { |
f4800078 PZ |
2033 | nluns = 0; |
2034 | } else { | |
2035 | if ((nluns = *p) == 55) { | |
2036 | nluns = 0; | |
2037 | } else { | |
2038 | /* GetMaxLUN returns the maximum LUN number */ | |
2039 | nluns += 1; | |
2040 | if (nluns > UB_MAX_LUNS) | |
2041 | nluns = UB_MAX_LUNS; | |
2042 | } | |
f4800078 PZ |
2043 | } |
2044 | ||
2045 | kfree(p); | |
2046 | return nluns; | |
2047 | ||
64bd8453 | 2048 | err_io: |
f4800078 PZ |
2049 | err_submit: |
2050 | kfree(p); | |
2051 | err_alloc: | |
2052 | return rc; | |
2053 | } | |
2054 | ||
1da177e4 LT |
2055 | /* |
2056 | * Clear initial stalls. | |
2057 | */ | |
2058 | static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) | |
2059 | { | |
2060 | int endp; | |
2061 | struct usb_ctrlrequest *cr; | |
2062 | struct completion compl; | |
2063 | struct timer_list timer; | |
2064 | int rc; | |
2065 | ||
2066 | init_completion(&compl); | |
2067 | ||
2068 | endp = usb_pipeendpoint(stalled_pipe); | |
2069 | if (usb_pipein (stalled_pipe)) | |
2070 | endp |= USB_DIR_IN; | |
2071 | ||
2072 | cr = &sc->work_cr; | |
2073 | cr->bRequestType = USB_RECIP_ENDPOINT; | |
2074 | cr->bRequest = USB_REQ_CLEAR_FEATURE; | |
2075 | cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); | |
2076 | cr->wIndex = cpu_to_le16(endp); | |
2077 | cr->wLength = cpu_to_le16(0); | |
2078 | ||
2079 | usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, | |
2080 | (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); | |
1da177e4 LT |
2081 | sc->work_urb.actual_length = 0; |
2082 | sc->work_urb.error_count = 0; | |
2083 | sc->work_urb.status = 0; | |
2084 | ||
2085 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { | |
2086 | printk(KERN_WARNING | |
2087 | "%s: Unable to submit a probe clear (%d)\n", sc->name, rc); | |
2088 | return rc; | |
2089 | } | |
2090 | ||
2091 | init_timer(&timer); | |
2092 | timer.function = ub_probe_timeout; | |
2093 | timer.data = (unsigned long) &compl; | |
2094 | timer.expires = jiffies + UB_CTRL_TIMEOUT; | |
2095 | add_timer(&timer); | |
2096 | ||
2097 | wait_for_completion(&compl); | |
2098 | ||
2099 | del_timer_sync(&timer); | |
2100 | usb_kill_urb(&sc->work_urb); | |
2101 | ||
2102 | /* reset the endpoint toggle */ | |
2103 | usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0); | |
2104 | ||
2105 | return 0; | |
2106 | } | |
2107 | ||
2108 | /* | |
2109 | * Get the pipe settings. | |
2110 | */ | |
2111 | static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev, | |
2112 | struct usb_interface *intf) | |
2113 | { | |
2114 | struct usb_host_interface *altsetting = intf->cur_altsetting; | |
2115 | struct usb_endpoint_descriptor *ep_in = NULL; | |
2116 | struct usb_endpoint_descriptor *ep_out = NULL; | |
2117 | struct usb_endpoint_descriptor *ep; | |
2118 | int i; | |
2119 | ||
2120 | /* | |
2121 | * Find the endpoints we need. | |
2122 | * We are expecting a minimum of 2 endpoints - in and out (bulk). | |
2123 | * We will ignore any others. | |
2124 | */ | |
2125 | for (i = 0; i < altsetting->desc.bNumEndpoints; i++) { | |
2126 | ep = &altsetting->endpoint[i].desc; | |
2127 | ||
2128 | /* Is it a BULK endpoint? */ | |
2129 | if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) | |
2130 | == USB_ENDPOINT_XFER_BULK) { | |
2131 | /* BULK in or out? */ | |
643616e6 PZ |
2132 | if (ep->bEndpointAddress & USB_DIR_IN) { |
2133 | if (ep_in == NULL) | |
2134 | ep_in = ep; | |
2135 | } else { | |
2136 | if (ep_out == NULL) | |
2137 | ep_out = ep; | |
2138 | } | |
1da177e4 LT |
2139 | } |
2140 | } | |
2141 | ||
2142 | if (ep_in == NULL || ep_out == NULL) { | |
f4800078 PZ |
2143 | printk(KERN_NOTICE "%s: failed endpoint check\n", |
2144 | sc->name); | |
2c26c9e6 | 2145 | return -ENODEV; |
1da177e4 LT |
2146 | } |
2147 | ||
2148 | /* Calculate and store the pipe values */ | |
2149 | sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0); | |
2150 | sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0); | |
2151 | sc->send_bulk_pipe = usb_sndbulkpipe(dev, | |
2152 | ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); | |
2153 | sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, | |
2154 | ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); | |
2155 | ||
2156 | return 0; | |
2157 | } | |
2158 | ||
2159 | /* | |
2160 | * Probing is done in the process context, which allows us to cheat | |
2161 | * and not to build a state machine for the discovery. | |
2162 | */ | |
2163 | static int ub_probe(struct usb_interface *intf, | |
2164 | const struct usb_device_id *dev_id) | |
2165 | { | |
2166 | struct ub_dev *sc; | |
f4800078 | 2167 | int nluns; |
1da177e4 LT |
2168 | int rc; |
2169 | int i; | |
2170 | ||
a00828e9 PZ |
2171 | if (usb_usual_check_type(dev_id, USB_US_TYPE_UB)) |
2172 | return -ENXIO; | |
2173 | ||
1da177e4 | 2174 | rc = -ENOMEM; |
29da7937 | 2175 | if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) |
1da177e4 | 2176 | goto err_core; |
65b4fe55 | 2177 | sc->lock = ub_next_lock(); |
f4800078 | 2178 | INIT_LIST_HEAD(&sc->luns); |
1da177e4 LT |
2179 | usb_init_urb(&sc->work_urb); |
2180 | tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); | |
2181 | atomic_set(&sc->poison, 0); | |
c4028958 | 2182 | INIT_WORK(&sc->reset_work, ub_reset_task); |
2c26c9e6 | 2183 | init_waitqueue_head(&sc->reset_wait); |
1da177e4 LT |
2184 | |
2185 | init_timer(&sc->work_timer); | |
2186 | sc->work_timer.data = (unsigned long) sc; | |
2187 | sc->work_timer.function = ub_urb_timeout; | |
2188 | ||
2189 | ub_init_completion(&sc->work_done); | |
2190 | sc->work_done.done = 1; /* A little yuk, but oh well... */ | |
2191 | ||
1da177e4 LT |
2192 | sc->dev = interface_to_usbdev(intf); |
2193 | sc->intf = intf; | |
2194 | // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; | |
1da177e4 LT |
2195 | usb_set_intfdata(intf, sc); |
2196 | usb_get_dev(sc->dev); | |
77ef6c4d PZ |
2197 | /* |
2198 | * Since we give the interface struct to the block level through | |
2199 | * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent | |
2200 | * oopses on close after a disconnect (kernels 2.6.16 and up). | |
2201 | */ | |
2202 | usb_get_intf(sc->intf); | |
1da177e4 | 2203 | |
f4800078 PZ |
2204 | snprintf(sc->name, 12, DRV_NAME "(%d.%d)", |
2205 | sc->dev->bus->busnum, sc->dev->devnum); | |
2206 | ||
1da177e4 LT |
2207 | /* XXX Verify that we can handle the device (from descriptors) */ |
2208 | ||
2c26c9e6 PZ |
2209 | if (ub_get_pipes(sc, sc->dev, intf) != 0) |
2210 | goto err_dev_desc; | |
1da177e4 | 2211 | |
1da177e4 LT |
2212 | /* |
2213 | * At this point, all USB initialization is done, do upper layer. | |
2214 | * We really hate halfway initialized structures, so from the | |
2215 | * invariants perspective, this ub_dev is fully constructed at | |
2216 | * this point. | |
2217 | */ | |
2218 | ||
2219 | /* | |
2220 | * This is needed to clear toggles. It is a problem only if we do | |
2221 | * `rmmod ub && modprobe ub` without disconnects, but we like that. | |
2222 | */ | |
c6c88834 | 2223 | #if 0 /* iPod Mini fails if we do this (big white iPod works) */ |
1da177e4 LT |
2224 | ub_probe_clear_stall(sc, sc->recv_bulk_pipe); |
2225 | ub_probe_clear_stall(sc, sc->send_bulk_pipe); | |
c6c88834 | 2226 | #endif |
1da177e4 LT |
2227 | |
2228 | /* | |
2229 | * The way this is used by the startup code is a little specific. | |
2230 | * A SCSI check causes a USB stall. Our common case code sees it | |
2231 | * and clears the check, after which the device is ready for use. | |
2232 | * But if a check was not present, any command other than | |
2233 | * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE). | |
2234 | * | |
2235 | * If we neglect to clear the SCSI check, the first real command fails | |
2236 | * (which is the capacity readout). We clear that and retry, but why | |
2237 | * causing spurious retries for no reason. | |
2238 | * | |
2239 | * Revalidation may start with its own TEST_UNIT_READY, but that one | |
2240 | * has to succeed, so we clear checks with an additional one here. | |
2241 | * In any case it's not our business how revaliadation is implemented. | |
2242 | */ | |
b5600339 | 2243 | for (i = 0; i < 3; i++) { /* Retries for the schwag key from KS'04 */ |
f4800078 | 2244 | if ((rc = ub_sync_tur(sc, NULL)) <= 0) break; |
1da177e4 LT |
2245 | if (rc != 0x6) break; |
2246 | msleep(10); | |
2247 | } | |
2248 | ||
f4800078 PZ |
2249 | nluns = 1; |
2250 | for (i = 0; i < 3; i++) { | |
11a223ae | 2251 | if ((rc = ub_sync_getmaxlun(sc)) < 0) |
f4800078 | 2252 | break; |
f4800078 PZ |
2253 | if (rc != 0) { |
2254 | nluns = rc; | |
2255 | break; | |
2256 | } | |
9f793d2c | 2257 | msleep(100); |
f4800078 | 2258 | } |
1da177e4 | 2259 | |
f4800078 PZ |
2260 | for (i = 0; i < nluns; i++) { |
2261 | ub_probe_lun(sc, i); | |
2262 | } | |
2263 | return 0; | |
2264 | ||
2c26c9e6 | 2265 | err_dev_desc: |
f4800078 | 2266 | usb_set_intfdata(intf, NULL); |
77ef6c4d | 2267 | usb_put_intf(sc->intf); |
f4800078 PZ |
2268 | usb_put_dev(sc->dev); |
2269 | kfree(sc); | |
2270 | err_core: | |
2271 | return rc; | |
2272 | } | |
2273 | ||
2274 | static int ub_probe_lun(struct ub_dev *sc, int lnum) | |
2275 | { | |
2276 | struct ub_lun *lun; | |
165125e1 | 2277 | struct request_queue *q; |
f4800078 PZ |
2278 | struct gendisk *disk; |
2279 | int rc; | |
2280 | ||
2281 | rc = -ENOMEM; | |
29da7937 | 2282 | if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL) |
f4800078 | 2283 | goto err_alloc; |
f4800078 PZ |
2284 | lun->num = lnum; |
2285 | ||
2286 | rc = -ENOSR; | |
2287 | if ((lun->id = ub_id_get()) == -1) | |
2288 | goto err_id; | |
2289 | ||
2290 | lun->udev = sc; | |
f4800078 PZ |
2291 | |
2292 | snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)", | |
2293 | lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num); | |
2294 | ||
2295 | lun->removable = 1; /* XXX Query this from the device */ | |
2296 | lun->changed = 1; /* ub_revalidate clears only */ | |
f4800078 | 2297 | ub_revalidate(sc, lun); |
1da177e4 | 2298 | |
1da177e4 | 2299 | rc = -ENOMEM; |
4fb729f5 | 2300 | if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL) |
1da177e4 LT |
2301 | goto err_diskalloc; |
2302 | ||
f4800078 | 2303 | sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); |
1da177e4 | 2304 | disk->major = UB_MAJOR; |
4fb729f5 | 2305 | disk->first_minor = lun->id * UB_PARTS_PER_LUN; |
1da177e4 | 2306 | disk->fops = &ub_bd_fops; |
f4800078 | 2307 | disk->private_data = lun; |
64bd8453 | 2308 | disk->driverfs_dev = &sc->intf->dev; |
1da177e4 LT |
2309 | |
2310 | rc = -ENOMEM; | |
65b4fe55 | 2311 | if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL) |
1da177e4 LT |
2312 | goto err_blkqinit; |
2313 | ||
2314 | disk->queue = q; | |
2315 | ||
f4800078 | 2316 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
1da177e4 LT |
2317 | blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); |
2318 | blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); | |
f4800078 | 2319 | blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ |
1da177e4 | 2320 | blk_queue_max_sectors(q, UB_MAX_SECTORS); |
f4800078 | 2321 | blk_queue_hardsect_size(q, lun->capacity.bsize); |
1da177e4 | 2322 | |
688e9fb1 | 2323 | lun->disk = disk; |
f4800078 | 2324 | q->queuedata = lun; |
688e9fb1 | 2325 | list_add(&lun->link, &sc->luns); |
1da177e4 | 2326 | |
f4800078 PZ |
2327 | set_capacity(disk, lun->capacity.nsec); |
2328 | if (lun->removable) | |
1da177e4 LT |
2329 | disk->flags |= GENHD_FL_REMOVABLE; |
2330 | ||
2331 | add_disk(disk); | |
2332 | ||
2333 | return 0; | |
2334 | ||
2335 | err_blkqinit: | |
2336 | put_disk(disk); | |
2337 | err_diskalloc: | |
f4800078 | 2338 | ub_id_put(lun->id); |
1da177e4 | 2339 | err_id: |
f4800078 PZ |
2340 | kfree(lun); |
2341 | err_alloc: | |
1da177e4 LT |
2342 | return rc; |
2343 | } | |
2344 | ||
2345 | static void ub_disconnect(struct usb_interface *intf) | |
2346 | { | |
2347 | struct ub_dev *sc = usb_get_intfdata(intf); | |
f4800078 | 2348 | struct ub_lun *lun; |
1da177e4 LT |
2349 | unsigned long flags; |
2350 | ||
2351 | /* | |
2352 | * Prevent ub_bd_release from pulling the rug from under us. | |
2353 | * XXX This is starting to look like a kref. | |
2354 | * XXX Why not to take this ref at probe time? | |
2355 | */ | |
2356 | spin_lock_irqsave(&ub_lock, flags); | |
2357 | sc->openc++; | |
2358 | spin_unlock_irqrestore(&ub_lock, flags); | |
2359 | ||
2360 | /* | |
2361 | * Fence stall clearnings, operations triggered by unlinkings and so on. | |
2362 | * We do not attempt to unlink any URBs, because we do not trust the | |
2363 | * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway. | |
2364 | */ | |
2365 | atomic_set(&sc->poison, 1); | |
2366 | ||
2c26c9e6 PZ |
2367 | /* |
2368 | * Wait for reset to end, if any. | |
2369 | */ | |
2370 | wait_event(sc->reset_wait, !sc->reset); | |
2371 | ||
1da177e4 LT |
2372 | /* |
2373 | * Blow away queued commands. | |
2374 | * | |
2375 | * Actually, this never works, because before we get here | |
2376 | * the HCD terminates outstanding URB(s). It causes our | |
2377 | * SCSI command queue to advance, commands fail to submit, | |
2378 | * and the whole queue drains. So, we just use this code to | |
2379 | * print warnings. | |
2380 | */ | |
65b4fe55 | 2381 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 LT |
2382 | { |
2383 | struct ub_scsi_cmd *cmd; | |
2384 | int cnt = 0; | |
2c26c9e6 | 2385 | while ((cmd = ub_cmdq_peek(sc)) != NULL) { |
1da177e4 LT |
2386 | cmd->error = -ENOTCONN; |
2387 | cmd->state = UB_CMDST_DONE; | |
1da177e4 LT |
2388 | ub_cmdq_pop(sc); |
2389 | (*cmd->done)(sc, cmd); | |
2390 | cnt++; | |
2391 | } | |
2392 | if (cnt != 0) { | |
2393 | printk(KERN_WARNING "%s: " | |
2394 | "%d was queued after shutdown\n", sc->name, cnt); | |
2395 | } | |
2396 | } | |
65b4fe55 | 2397 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 LT |
2398 | |
2399 | /* | |
2400 | * Unregister the upper layer. | |
2401 | */ | |
a69228de | 2402 | list_for_each_entry(lun, &sc->luns, link) { |
688e9fb1 | 2403 | del_gendisk(lun->disk); |
f4800078 PZ |
2404 | /* |
2405 | * I wish I could do: | |
2406 | * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | |
2407 | * As it is, we rely on our internal poisoning and let | |
2408 | * the upper levels to spin furiously failing all the I/O. | |
2409 | */ | |
2410 | } | |
1da177e4 LT |
2411 | |
2412 | /* | |
1da177e4 LT |
2413 | * Testing for -EINPROGRESS is always a bug, so we are bending |
2414 | * the rules a little. | |
2415 | */ | |
65b4fe55 | 2416 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 LT |
2417 | if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */ |
2418 | printk(KERN_WARNING "%s: " | |
2419 | "URB is active after disconnect\n", sc->name); | |
2420 | } | |
65b4fe55 | 2421 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 LT |
2422 | |
2423 | /* | |
2424 | * There is virtually no chance that other CPU runs times so long | |
2425 | * after ub_urb_complete should have called del_timer, but only if HCD | |
2426 | * didn't forget to deliver a callback on unlink. | |
2427 | */ | |
2428 | del_timer_sync(&sc->work_timer); | |
2429 | ||
2430 | /* | |
2431 | * At this point there must be no commands coming from anyone | |
2432 | * and no URBs left in transit. | |
2433 | */ | |
2434 | ||
1da177e4 LT |
2435 | ub_put(sc); |
2436 | } | |
2437 | ||
2438 | static struct usb_driver ub_driver = { | |
1da177e4 LT |
2439 | .name = "ub", |
2440 | .probe = ub_probe, | |
2441 | .disconnect = ub_disconnect, | |
2442 | .id_table = ub_usb_ids, | |
2443 | }; | |
2444 | ||
2445 | static int __init ub_init(void) | |
2446 | { | |
2447 | int rc; | |
65b4fe55 PZ |
2448 | int i; |
2449 | ||
2450 | for (i = 0; i < UB_QLOCK_NUM; i++) | |
2451 | spin_lock_init(&ub_qlockv[i]); | |
1da177e4 | 2452 | |
1da177e4 LT |
2453 | if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) |
2454 | goto err_regblkdev; | |
1da177e4 LT |
2455 | |
2456 | if ((rc = usb_register(&ub_driver)) != 0) | |
2457 | goto err_register; | |
2458 | ||
a00828e9 | 2459 | usb_usual_set_present(USB_US_TYPE_UB); |
1da177e4 LT |
2460 | return 0; |
2461 | ||
2462 | err_register: | |
1da177e4 LT |
2463 | unregister_blkdev(UB_MAJOR, DRV_NAME); |
2464 | err_regblkdev: | |
2465 | return rc; | |
2466 | } | |
2467 | ||
2468 | static void __exit ub_exit(void) | |
2469 | { | |
2470 | usb_deregister(&ub_driver); | |
2471 | ||
1da177e4 | 2472 | unregister_blkdev(UB_MAJOR, DRV_NAME); |
a00828e9 | 2473 | usb_usual_clear_present(USB_US_TYPE_UB); |
1da177e4 LT |
2474 | } |
2475 | ||
2476 | module_init(ub_init); | |
2477 | module_exit(ub_exit); | |
2478 | ||
2479 | MODULE_LICENSE("GPL"); |