Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _SCSI_SCSI_HOST_H |
2 | #define _SCSI_SCSI_HOST_H | |
3 | ||
4 | #include <linux/device.h> | |
5 | #include <linux/list.h> | |
6 | #include <linux/types.h> | |
7 | #include <linux/workqueue.h> | |
0b950672 | 8 | #include <linux/mutex.h> |
1da177e4 LT |
9 | |
10 | struct block_device; | |
7dfdc9a5 | 11 | struct completion; |
1da177e4 LT |
12 | struct module; |
13 | struct scsi_cmnd; | |
14 | struct scsi_device; | |
a283bd37 | 15 | struct scsi_target; |
1da177e4 LT |
16 | struct Scsi_Host; |
17 | struct scsi_host_cmd_pool; | |
18 | struct scsi_transport_template; | |
19 | ||
20 | ||
21 | /* | |
22 | * The various choices mean: | |
23 | * NONE: Self evident. Host adapter is not capable of scatter-gather. | |
24 | * ALL: Means that the host adapter module can do scatter-gather, | |
25 | * and that there is no limit to the size of the table to which | |
26 | * we scatter/gather data. | |
27 | * Anything else: Indicates the maximum number of chains that can be | |
28 | * used in one scatter-gather request. | |
29 | */ | |
30 | #define SG_NONE 0 | |
31 | #define SG_ALL 0xff | |
32 | ||
33 | ||
34 | #define DISABLE_CLUSTERING 0 | |
35 | #define ENABLE_CLUSTERING 1 | |
36 | ||
37 | enum scsi_eh_timer_return { | |
38 | EH_NOT_HANDLED, | |
39 | EH_HANDLED, | |
40 | EH_RESET_TIMER, | |
41 | }; | |
42 | ||
43 | ||
44 | struct scsi_host_template { | |
45 | struct module *module; | |
46 | const char *name; | |
47 | ||
48 | /* | |
49 | * Used to initialize old-style drivers. For new-style drivers | |
50 | * just perform all work in your module initialization function. | |
51 | * | |
52 | * Status: OBSOLETE | |
53 | */ | |
54 | int (* detect)(struct scsi_host_template *); | |
55 | ||
56 | /* | |
57 | * Used as unload callback for hosts with old-style drivers. | |
58 | * | |
59 | * Status: OBSOLETE | |
60 | */ | |
61 | int (* release)(struct Scsi_Host *); | |
62 | ||
63 | /* | |
64 | * The info function will return whatever useful information the | |
65 | * developer sees fit. If not provided, then the name field will | |
66 | * be used instead. | |
67 | * | |
68 | * Status: OPTIONAL | |
69 | */ | |
70 | const char *(* info)(struct Scsi_Host *); | |
71 | ||
72 | /* | |
73 | * Ioctl interface | |
74 | * | |
75 | * Status: OPTIONAL | |
76 | */ | |
77 | int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg); | |
78 | ||
79 | ||
80 | #ifdef CONFIG_COMPAT | |
81 | /* | |
82 | * Compat handler. Handle 32bit ABI. | |
83 | * When unknown ioctl is passed return -ENOIOCTLCMD. | |
84 | * | |
85 | * Status: OPTIONAL | |
86 | */ | |
87 | int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg); | |
88 | #endif | |
89 | ||
90 | /* | |
91 | * The queuecommand function is used to queue up a scsi | |
92 | * command block to the LLDD. When the driver finished | |
93 | * processing the command the done callback is invoked. | |
94 | * | |
95 | * If queuecommand returns 0, then the HBA has accepted the | |
96 | * command. The done() function must be called on the command | |
97 | * when the driver has finished with it. (you may call done on the | |
98 | * command before queuecommand returns, but in this case you | |
99 | * *must* return 0 from queuecommand). | |
100 | * | |
101 | * Queuecommand may also reject the command, in which case it may | |
102 | * not touch the command and must not call done() for it. | |
103 | * | |
104 | * There are two possible rejection returns: | |
105 | * | |
106 | * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but | |
107 | * allow commands to other devices serviced by this host. | |
108 | * | |
109 | * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this | |
110 | * host temporarily. | |
111 | * | |
112 | * For compatibility, any other non-zero return is treated the | |
113 | * same as SCSI_MLQUEUE_HOST_BUSY. | |
114 | * | |
115 | * NOTE: "temporarily" means either until the next command for# | |
116 | * this device/host completes, or a period of time determined by | |
117 | * I/O pressure in the system if there are no other outstanding | |
118 | * commands. | |
119 | * | |
120 | * STATUS: REQUIRED | |
121 | */ | |
122 | int (* queuecommand)(struct scsi_cmnd *, | |
123 | void (*done)(struct scsi_cmnd *)); | |
124 | ||
125 | /* | |
126 | * This is an error handling strategy routine. You don't need to | |
127 | * define one of these if you don't want to - there is a default | |
128 | * routine that is present that should work in most cases. For those | |
129 | * driver authors that have the inclination and ability to write their | |
130 | * own strategy routine, this is where it is specified. Note - the | |
131 | * strategy routine is *ALWAYS* run in the context of the kernel eh | |
132 | * thread. Thus you are guaranteed to *NOT* be in an interrupt | |
133 | * handler when you execute this, and you are also guaranteed to | |
134 | * *NOT* have any other commands being queued while you are in the | |
135 | * strategy routine. When you return from this function, operations | |
136 | * return to normal. | |
137 | * | |
138 | * See scsi_error.c scsi_unjam_host for additional comments about | |
139 | * what this function should and should not be attempting to do. | |
140 | * | |
141 | * Status: REQUIRED (at least one of them) | |
142 | */ | |
143 | int (* eh_strategy_handler)(struct Scsi_Host *); | |
144 | int (* eh_abort_handler)(struct scsi_cmnd *); | |
145 | int (* eh_device_reset_handler)(struct scsi_cmnd *); | |
146 | int (* eh_bus_reset_handler)(struct scsi_cmnd *); | |
147 | int (* eh_host_reset_handler)(struct scsi_cmnd *); | |
148 | ||
149 | /* | |
150 | * This is an optional routine to notify the host that the scsi | |
151 | * timer just fired. The returns tell the timer routine what to | |
152 | * do about this: | |
153 | * | |
154 | * EH_HANDLED: I fixed the error, please complete the command | |
155 | * EH_RESET_TIMER: I need more time, reset the timer and | |
156 | * begin counting again | |
157 | * EH_NOT_HANDLED Begin normal error recovery | |
158 | * | |
159 | * Status: OPTIONAL | |
160 | */ | |
161 | enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); | |
162 | ||
163 | /* | |
164 | * Before the mid layer attempts to scan for a new device where none | |
165 | * currently exists, it will call this entry in your driver. Should | |
166 | * your driver need to allocate any structs or perform any other init | |
167 | * items in order to send commands to a currently unused target/lun | |
168 | * combo, then this is where you can perform those allocations. This | |
169 | * is specifically so that drivers won't have to perform any kind of | |
170 | * "is this a new device" checks in their queuecommand routine, | |
171 | * thereby making the hot path a bit quicker. | |
172 | * | |
173 | * Return values: 0 on success, non-0 on failure | |
174 | * | |
175 | * Deallocation: If we didn't find any devices at this ID, you will | |
176 | * get an immediate call to slave_destroy(). If we find something | |
177 | * here then you will get a call to slave_configure(), then the | |
178 | * device will be used for however long it is kept around, then when | |
179 | * the device is removed from the system (or * possibly at reboot | |
180 | * time), you will then get a call to slave_destroy(). This is | |
181 | * assuming you implement slave_configure and slave_destroy. | |
182 | * However, if you allocate memory and hang it off the device struct, | |
183 | * then you must implement the slave_destroy() routine at a minimum | |
184 | * in order to avoid leaking memory | |
185 | * each time a device is tore down. | |
186 | * | |
187 | * Status: OPTIONAL | |
188 | */ | |
189 | int (* slave_alloc)(struct scsi_device *); | |
190 | ||
191 | /* | |
192 | * Once the device has responded to an INQUIRY and we know the | |
193 | * device is online, we call into the low level driver with the | |
194 | * struct scsi_device *. If the low level device driver implements | |
195 | * this function, it *must* perform the task of setting the queue | |
196 | * depth on the device. All other tasks are optional and depend | |
197 | * on what the driver supports and various implementation details. | |
198 | * | |
199 | * Things currently recommended to be handled at this time include: | |
200 | * | |
201 | * 1. Setting the device queue depth. Proper setting of this is | |
202 | * described in the comments for scsi_adjust_queue_depth. | |
203 | * 2. Determining if the device supports the various synchronous | |
204 | * negotiation protocols. The device struct will already have | |
205 | * responded to INQUIRY and the results of the standard items | |
206 | * will have been shoved into the various device flag bits, eg. | |
207 | * device->sdtr will be true if the device supports SDTR messages. | |
208 | * 3. Allocating command structs that the device will need. | |
209 | * 4. Setting the default timeout on this device (if needed). | |
210 | * 5. Anything else the low level driver might want to do on a device | |
211 | * specific setup basis... | |
212 | * 6. Return 0 on success, non-0 on error. The device will be marked | |
213 | * as offline on error so that no access will occur. If you return | |
214 | * non-0, your slave_destroy routine will never get called for this | |
215 | * device, so don't leave any loose memory hanging around, clean | |
216 | * up after yourself before returning non-0 | |
217 | * | |
218 | * Status: OPTIONAL | |
219 | */ | |
220 | int (* slave_configure)(struct scsi_device *); | |
221 | ||
222 | /* | |
223 | * Immediately prior to deallocating the device and after all activity | |
224 | * has ceased the mid layer calls this point so that the low level | |
225 | * driver may completely detach itself from the scsi device and vice | |
226 | * versa. The low level driver is responsible for freeing any memory | |
227 | * it allocated in the slave_alloc or slave_configure calls. | |
228 | * | |
229 | * Status: OPTIONAL | |
230 | */ | |
231 | void (* slave_destroy)(struct scsi_device *); | |
232 | ||
a283bd37 JB |
233 | /* |
234 | * Before the mid layer attempts to scan for a new device attached | |
235 | * to a target where no target currently exists, it will call this | |
236 | * entry in your driver. Should your driver need to allocate any | |
237 | * structs or perform any other init items in order to send commands | |
238 | * to a currently unused target, then this is where you can perform | |
239 | * those allocations. | |
240 | * | |
241 | * Return values: 0 on success, non-0 on failure | |
242 | * | |
243 | * Status: OPTIONAL | |
244 | */ | |
245 | int (* target_alloc)(struct scsi_target *); | |
246 | ||
247 | /* | |
248 | * Immediately prior to deallocating the target structure, and | |
249 | * after all activity to attached scsi devices has ceased, the | |
250 | * midlayer calls this point so that the driver may deallocate | |
251 | * and terminate any references to the target. | |
252 | * | |
253 | * Status: OPTIONAL | |
254 | */ | |
255 | void (* target_destroy)(struct scsi_target *); | |
256 | ||
1da177e4 LT |
257 | /* |
258 | * fill in this function to allow the queue depth of this host | |
259 | * to be changeable (on a per device basis). returns either | |
260 | * the current queue depth setting (may be different from what | |
261 | * was passed in) or an error. An error should only be | |
262 | * returned if the requested depth is legal but the driver was | |
263 | * unable to set it. If the requested depth is illegal, the | |
264 | * driver should set and return the closest legal queue depth. | |
265 | * | |
266 | */ | |
267 | int (* change_queue_depth)(struct scsi_device *, int); | |
268 | ||
269 | /* | |
270 | * fill in this function to allow the changing of tag types | |
271 | * (this also allows the enabling/disabling of tag command | |
272 | * queueing). An error should only be returned if something | |
273 | * went wrong in the driver while trying to set the tag type. | |
274 | * If the driver doesn't support the requested tag type, then | |
275 | * it should set the closest type it does support without | |
276 | * returning an error. Returns the actual tag type set. | |
277 | */ | |
278 | int (* change_queue_type)(struct scsi_device *, int); | |
279 | ||
280 | /* | |
281 | * This function determines the bios parameters for a given | |
282 | * harddisk. These tend to be numbers that are made up by | |
283 | * the host adapter. Parameters: | |
284 | * size, device, list (heads, sectors, cylinders) | |
285 | * | |
286 | * Status: OPTIONAL */ | |
287 | int (* bios_param)(struct scsi_device *, struct block_device *, | |
288 | sector_t, int []); | |
289 | ||
290 | /* | |
291 | * Can be used to export driver statistics and other infos to the | |
292 | * world outside the kernel ie. userspace and it also provides an | |
293 | * interface to feed the driver with information. | |
294 | * | |
295 | * Status: OBSOLETE | |
296 | */ | |
297 | int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int); | |
298 | ||
9b847548 JA |
299 | /* |
300 | * suspend support | |
301 | */ | |
302 | int (*resume)(struct scsi_device *); | |
303 | int (*suspend)(struct scsi_device *); | |
304 | ||
1da177e4 LT |
305 | /* |
306 | * Name of proc directory | |
307 | */ | |
308 | char *proc_name; | |
309 | ||
310 | /* | |
311 | * Used to store the procfs directory if a driver implements the | |
312 | * proc_info method. | |
313 | */ | |
314 | struct proc_dir_entry *proc_dir; | |
315 | ||
316 | /* | |
317 | * This determines if we will use a non-interrupt driven | |
318 | * or an interrupt driven scheme, It is set to the maximum number | |
319 | * of simultaneous commands a given host adapter will accept. | |
320 | */ | |
321 | int can_queue; | |
322 | ||
323 | /* | |
324 | * In many instances, especially where disconnect / reconnect are | |
325 | * supported, our host also has an ID on the SCSI bus. If this is | |
326 | * the case, then it must be reserved. Please set this_id to -1 if | |
327 | * your setup is in single initiator mode, and the host lacks an | |
328 | * ID. | |
329 | */ | |
330 | int this_id; | |
331 | ||
332 | /* | |
333 | * This determines the degree to which the host adapter is capable | |
334 | * of scatter-gather. | |
335 | */ | |
336 | unsigned short sg_tablesize; | |
337 | ||
338 | /* | |
339 | * If the host adapter has limitations beside segment count | |
340 | */ | |
341 | unsigned short max_sectors; | |
342 | ||
343 | /* | |
344 | * dma scatter gather segment boundary limit. a segment crossing this | |
345 | * boundary will be split in two. | |
346 | */ | |
347 | unsigned long dma_boundary; | |
348 | ||
349 | /* | |
350 | * This specifies "machine infinity" for host templates which don't | |
351 | * limit the transfer size. Note this limit represents an absolute | |
352 | * maximum, and may be over the transfer limits allowed for | |
353 | * individual devices (e.g. 256 for SCSI-1) | |
354 | */ | |
355 | #define SCSI_DEFAULT_MAX_SECTORS 1024 | |
356 | ||
357 | /* | |
358 | * True if this host adapter can make good use of linked commands. | |
359 | * This will allow more than one command to be queued to a given | |
360 | * unit on a given host. Set this to the maximum number of command | |
361 | * blocks to be provided for each device. Set this to 1 for one | |
362 | * command block per lun, 2 for two, etc. Do not set this to 0. | |
363 | * You should make sure that the host adapter will do the right thing | |
364 | * before you try setting this above 1. | |
365 | */ | |
366 | short cmd_per_lun; | |
367 | ||
368 | /* | |
369 | * present contains counter indicating how many boards of this | |
370 | * type were found when we did the scan. | |
371 | */ | |
372 | unsigned char present; | |
373 | ||
374 | /* | |
375 | * true if this host adapter uses unchecked DMA onto an ISA bus. | |
376 | */ | |
377 | unsigned unchecked_isa_dma:1; | |
378 | ||
379 | /* | |
380 | * true if this host adapter can make good use of clustering. | |
381 | * I originally thought that if the tablesize was large that it | |
382 | * was a waste of CPU cycles to prepare a cluster list, but | |
383 | * it works out that the Buslogic is faster if you use a smaller | |
384 | * number of segments (i.e. use clustering). I guess it is | |
385 | * inefficient. | |
386 | */ | |
387 | unsigned use_clustering:1; | |
388 | ||
389 | /* | |
390 | * True for emulated SCSI host adapters (e.g. ATAPI) | |
391 | */ | |
392 | unsigned emulated:1; | |
393 | ||
394 | /* | |
395 | * True if the low-level driver performs its own reset-settle delays. | |
396 | */ | |
397 | unsigned skip_settle_delay:1; | |
398 | ||
399 | /* | |
400 | * ordered write support | |
401 | */ | |
1da177e4 LT |
402 | unsigned ordered_tag:1; |
403 | ||
404 | /* | |
405 | * Countdown for host blocking with no commands outstanding | |
406 | */ | |
407 | unsigned int max_host_blocked; | |
408 | ||
409 | /* | |
410 | * Default value for the blocking. If the queue is empty, | |
411 | * host_blocked counts down in the request_fn until it restarts | |
412 | * host operations as zero is reached. | |
413 | * | |
414 | * FIXME: This should probably be a value in the template | |
415 | */ | |
416 | #define SCSI_DEFAULT_HOST_BLOCKED 7 | |
417 | ||
418 | /* | |
419 | * Pointer to the sysfs class properties for this host, NULL terminated. | |
420 | */ | |
421 | struct class_device_attribute **shost_attrs; | |
422 | ||
423 | /* | |
424 | * Pointer to the SCSI device properties for this host, NULL terminated. | |
425 | */ | |
426 | struct device_attribute **sdev_attrs; | |
427 | ||
428 | /* | |
429 | * List of hosts per template. | |
430 | * | |
431 | * This is only for use by scsi_module.c for legacy templates. | |
432 | * For these access to it is synchronized implicitly by | |
433 | * module_init/module_exit. | |
434 | */ | |
435 | struct list_head legacy_hosts; | |
436 | }; | |
437 | ||
438 | /* | |
d3301874 MA |
439 | * shost state: If you alter this, you also need to alter scsi_sysfs.c |
440 | * (for the ascii descriptions) and the state model enforcer: | |
441 | * scsi_host_set_state() | |
1da177e4 | 442 | */ |
d3301874 MA |
443 | enum scsi_host_state { |
444 | SHOST_CREATED = 1, | |
445 | SHOST_RUNNING, | |
1da177e4 | 446 | SHOST_CANCEL, |
d3301874 | 447 | SHOST_DEL, |
1da177e4 | 448 | SHOST_RECOVERY, |
939647ee JB |
449 | SHOST_CANCEL_RECOVERY, |
450 | SHOST_DEL_RECOVERY, | |
1da177e4 LT |
451 | }; |
452 | ||
453 | struct Scsi_Host { | |
454 | /* | |
455 | * __devices is protected by the host_lock, but you should | |
456 | * usually use scsi_device_lookup / shost_for_each_device | |
457 | * to access it and don't care about locking yourself. | |
458 | * In the rare case of beeing in irq context you can use | |
459 | * their __ prefixed variants with the lock held. NEVER | |
460 | * access this list directly from a driver. | |
461 | */ | |
462 | struct list_head __devices; | |
463 | struct list_head __targets; | |
464 | ||
465 | struct scsi_host_cmd_pool *cmd_pool; | |
466 | spinlock_t free_list_lock; | |
467 | struct list_head free_list; /* backup store of cmd structs */ | |
468 | struct list_head starved_list; | |
469 | ||
470 | spinlock_t default_lock; | |
471 | spinlock_t *host_lock; | |
472 | ||
0b950672 | 473 | struct mutex scan_mutex;/* serialize scanning activity */ |
1da177e4 LT |
474 | |
475 | struct list_head eh_cmd_q; | |
476 | struct task_struct * ehandler; /* Error recovery thread. */ | |
7dfdc9a5 CH |
477 | struct completion * eh_action; /* Wait for specific actions on the |
478 | host. */ | |
1da177e4 LT |
479 | wait_queue_head_t host_wait; |
480 | struct scsi_host_template *hostt; | |
481 | struct scsi_transport_template *transportt; | |
06f81ea8 | 482 | |
483 | /* | |
484 | * The following two fields are protected with host_lock; | |
485 | * however, eh routines can safely access during eh processing | |
486 | * without acquiring the lock. | |
487 | */ | |
488 | unsigned int host_busy; /* commands actually active on low-level */ | |
489 | unsigned int host_failed; /* commands that failed. */ | |
1da177e4 LT |
490 | |
491 | unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ | |
492 | int resetting; /* if set, it means that last_reset is a valid value */ | |
493 | unsigned long last_reset; | |
494 | ||
495 | /* | |
496 | * These three parameters can be used to allow for wide scsi, | |
497 | * and for host adapters that support multiple busses | |
498 | * The first two should be set to 1 more than the actual max id | |
499 | * or lun (i.e. 8 for normal systems). | |
500 | */ | |
501 | unsigned int max_id; | |
502 | unsigned int max_lun; | |
503 | unsigned int max_channel; | |
504 | ||
505 | /* | |
506 | * This is a unique identifier that must be assigned so that we | |
507 | * have some way of identifying each detected host adapter properly | |
508 | * and uniquely. For hosts that do not support more than one card | |
509 | * in the system at one time, this does not need to be set. It is | |
510 | * initialized to 0 in scsi_register. | |
511 | */ | |
512 | unsigned int unique_id; | |
513 | ||
514 | /* | |
515 | * The maximum length of SCSI commands that this host can accept. | |
516 | * Probably 12 for most host adapters, but could be 16 for others. | |
517 | * For drivers that don't set this field, a value of 12 is | |
518 | * assumed. I am leaving this as a number rather than a bit | |
519 | * because you never know what subsequent SCSI standards might do | |
520 | * (i.e. could there be a 20 byte or a 24-byte command a few years | |
521 | * down the road?). | |
522 | */ | |
523 | unsigned char max_cmd_len; | |
524 | ||
525 | int this_id; | |
526 | int can_queue; | |
527 | short cmd_per_lun; | |
528 | short unsigned int sg_tablesize; | |
529 | short unsigned int max_sectors; | |
530 | unsigned long dma_boundary; | |
531 | /* | |
532 | * Used to assign serial numbers to the cmds. | |
533 | * Protected by the host lock. | |
534 | */ | |
535 | unsigned long cmd_serial_number, cmd_pid; | |
536 | ||
537 | unsigned unchecked_isa_dma:1; | |
538 | unsigned use_clustering:1; | |
539 | unsigned use_blk_tcq:1; | |
540 | ||
541 | /* | |
542 | * Host has requested that no further requests come through for the | |
543 | * time being. | |
544 | */ | |
545 | unsigned host_self_blocked:1; | |
546 | ||
547 | /* | |
548 | * Host uses correct SCSI ordering not PC ordering. The bit is | |
549 | * set for the minority of drivers whose authors actually read | |
550 | * the spec ;) | |
551 | */ | |
552 | unsigned reverse_ordering:1; | |
553 | ||
554 | /* | |
555 | * ordered write support | |
556 | */ | |
557 | unsigned ordered_flush:1; | |
558 | unsigned ordered_tag:1; | |
559 | ||
560 | /* | |
561 | * Optional work queue to be utilized by the transport | |
562 | */ | |
563 | char work_q_name[KOBJ_NAME_LEN]; | |
564 | struct workqueue_struct *work_q; | |
565 | ||
566 | /* | |
567 | * Host has rejected a command because it was busy. | |
568 | */ | |
569 | unsigned int host_blocked; | |
570 | ||
571 | /* | |
572 | * Value host_blocked counts down from | |
573 | */ | |
574 | unsigned int max_host_blocked; | |
575 | ||
576 | /* legacy crap */ | |
577 | unsigned long base; | |
578 | unsigned long io_port; | |
579 | unsigned char n_io_port; | |
580 | unsigned char dma_channel; | |
581 | unsigned int irq; | |
582 | ||
583 | ||
d3301874 | 584 | enum scsi_host_state shost_state; |
1da177e4 LT |
585 | |
586 | /* ldm bits */ | |
587 | struct device shost_gendev; | |
588 | struct class_device shost_classdev; | |
589 | ||
590 | /* | |
591 | * List of hosts per template. | |
592 | * | |
593 | * This is only for use by scsi_module.c for legacy templates. | |
594 | * For these access to it is synchronized implicitly by | |
595 | * module_init/module_exit. | |
596 | */ | |
597 | struct list_head sht_legacy_list; | |
598 | ||
599 | /* | |
600 | * Points to the transport data (if any) which is allocated | |
601 | * separately | |
602 | */ | |
603 | void *shost_data; | |
604 | ||
605 | /* | |
606 | * We should ensure that this is aligned, both for better performance | |
607 | * and also because some compilers (m68k) don't automatically force | |
608 | * alignment to a long boundary. | |
609 | */ | |
610 | unsigned long hostdata[0] /* Used for storage of host specific stuff */ | |
611 | __attribute__ ((aligned (sizeof(unsigned long)))); | |
612 | }; | |
613 | ||
614 | #define class_to_shost(d) \ | |
615 | container_of(d, struct Scsi_Host, shost_classdev) | |
616 | ||
9ccfc756 JB |
617 | #define shost_printk(prefix, shost, fmt, a...) \ |
618 | dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a) | |
619 | ||
620 | ||
1da177e4 LT |
621 | int scsi_is_host_device(const struct device *); |
622 | ||
623 | static inline struct Scsi_Host *dev_to_shost(struct device *dev) | |
624 | { | |
625 | while (!scsi_is_host_device(dev)) { | |
626 | if (!dev->parent) | |
627 | return NULL; | |
628 | dev = dev->parent; | |
629 | } | |
630 | return container_of(dev, struct Scsi_Host, shost_gendev); | |
631 | } | |
632 | ||
939647ee JB |
633 | static inline int scsi_host_in_recovery(struct Scsi_Host *shost) |
634 | { | |
635 | return shost->shost_state == SHOST_RECOVERY || | |
636 | shost->shost_state == SHOST_CANCEL_RECOVERY || | |
637 | shost->shost_state == SHOST_DEL_RECOVERY; | |
638 | } | |
639 | ||
1da177e4 LT |
640 | extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); |
641 | extern void scsi_flush_work(struct Scsi_Host *); | |
642 | ||
643 | extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int); | |
644 | extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *); | |
645 | extern void scsi_scan_host(struct Scsi_Host *); | |
1da177e4 LT |
646 | extern void scsi_rescan_device(struct device *); |
647 | extern void scsi_remove_host(struct Scsi_Host *); | |
648 | extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); | |
649 | extern void scsi_host_put(struct Scsi_Host *t); | |
650 | extern struct Scsi_Host *scsi_host_lookup(unsigned short); | |
d3301874 | 651 | extern const char *scsi_host_state_name(enum scsi_host_state); |
1da177e4 LT |
652 | |
653 | extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *); | |
654 | ||
655 | static inline void scsi_assign_lock(struct Scsi_Host *shost, spinlock_t *lock) | |
656 | { | |
657 | shost->host_lock = lock; | |
658 | } | |
659 | ||
1da177e4 LT |
660 | static inline struct device *scsi_get_device(struct Scsi_Host *shost) |
661 | { | |
662 | return shost->shost_gendev.parent; | |
663 | } | |
664 | ||
82f29467 MA |
665 | /** |
666 | * scsi_host_scan_allowed - Is scanning of this host allowed | |
667 | * @shost: Pointer to Scsi_Host. | |
668 | **/ | |
669 | static inline int scsi_host_scan_allowed(struct Scsi_Host *shost) | |
670 | { | |
671 | return shost->shost_state == SHOST_RUNNING; | |
672 | } | |
673 | ||
1da177e4 LT |
674 | extern void scsi_unblock_requests(struct Scsi_Host *); |
675 | extern void scsi_block_requests(struct Scsi_Host *); | |
676 | ||
677 | struct class_container; | |
678 | /* | |
679 | * These two functions are used to allocate and free a pseudo device | |
680 | * which will connect to the host adapter itself rather than any | |
681 | * physical device. You must deallocate when you are done with the | |
682 | * thing. This physical pseudo-device isn't real and won't be available | |
683 | * from any high-level drivers. | |
684 | */ | |
685 | extern void scsi_free_host_dev(struct scsi_device *); | |
686 | extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *); | |
687 | ||
688 | /* legacy interfaces */ | |
689 | extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int); | |
690 | extern void scsi_unregister(struct Scsi_Host *); | |
47ba39ee | 691 | extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state); |
1da177e4 LT |
692 | |
693 | #endif /* _SCSI_SCSI_HOST_H */ |