1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6 **-----------------------------------------------------------------------------
8 ** This program is free software; you can redistribute it and/or modify
9 ** it under the terms of the GNU General Public License as published by
10 ** the Free Software Foundation; either version 2 of the License, or
11 ** (at your option) any later version.
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
18 ** You should have received a copy of the GNU General Public License
19 ** along with this program; if not, write to the Free Software
20 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 **-----------------------------------------------------------------------------
27 * This driver is designed exclusively for these chips (virtually the
28 * earliest of the scripts engine chips). They need their own drivers
29 * because they are missing so many of the scripts and snazzy register
30 * features of their elder brothers (the 710, 720 and 770).
32 * The 700 is the lowliest of the line, it can only do async SCSI.
33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
35 * The 700 chip has no host bus interface logic of its own. However,
36 * it is usually mapped to a location with well defined register
37 * offsets. Therefore, if you can determine the base address and the
38 * irq your board incorporating this chip uses, you can probably use
39 * this driver to run it (although you'll probably have to write a
40 * minimal wrapper for the purpose---see the NCR_D700 driver for
41 * details about how to do this).
46 * 1. Better statistics in the proc fs
48 * 2. Implement message queue (queues SCSI messages like commands) and make
49 * the abort and device reset functions use them.
56 * Fixed bad bug affecting tag starvation processing (previously the
57 * driver would hang the system if too many tags starved. Also fixed
58 * bad bug having to do with 10 byte command processing and REQUEST
59 * SENSE (the command would loop forever getting a transfer length
60 * mismatch in the CMD phase).
64 * Fixed scripts problem which caused certain devices (notably CDRWs)
65 * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
66 * __raw_readl/writel for parisc compatibility (Thomas
67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68 * for sense requests (Ryan Bradetich).
72 * Following test of the 64 bit parisc kernel by Richard Hirst,
73 * several problems have now been corrected. Also adds support for
74 * consistent memory allocation.
78 * More Compatibility changes for 710 (now actually works). Enhanced
79 * support for odd clock speeds which constrain SDTR negotiations.
80 * correct cacheline separation for scsi messages and status for
81 * incoherent architectures. Use of the pci mapping functions on
82 * buffers to begin support for 64 bit drivers.
86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87 * special 53c710 instructions or registers are used).
91 * More endianness/cache coherency changes.
93 * Better bad device handling (handles devices lying about tag
94 * queueing support and devices which fail to provide sense data on
95 * contingent allegiance conditions)
97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98 * debugging this driver on the parisc architecture and suggesting
99 * many improvements and bug fixes.
101 * Thanks also go to Linuxcare Inc. for providing several PARISC
102 * machines for me to debug the driver on.
106 * Made the driver mem or io mapped; added endian invariance; added
107 * dma cache flushing operations for architectures which need it;
108 * added support for more varied clocking speeds.
112 * Initial modularisation from the D700. See NCR_D700.c for the rest of
115 #define NCR_700_VERSION "2.8"
117 #include <linux/config.h>
118 #include <linux/kernel.h>
119 #include <linux/types.h>
120 #include <linux/string.h>
121 #include <linux/ioport.h>
122 #include <linux/delay.h>
123 #include <linux/spinlock.h>
124 #include <linux/completion.h>
125 #include <linux/sched.h>
126 #include <linux/init.h>
127 #include <linux/proc_fs.h>
128 #include <linux/blkdev.h>
129 #include <linux/module.h>
130 #include <linux/interrupt.h>
132 #include <asm/system.h>
134 #include <asm/pgtable.h>
135 #include <asm/byteorder.h>
137 #include <scsi/scsi.h>
138 #include <scsi/scsi_cmnd.h>
139 #include <scsi/scsi_dbg.h>
140 #include <scsi/scsi_eh.h>
141 #include <scsi/scsi_host.h>
142 #include <scsi/scsi_tcq.h>
143 #include <scsi/scsi_transport.h>
144 #include <scsi/scsi_transport_spi.h>
148 /* NOTE: For 64 bit drivers there are points in the code where we use
149 * a non dereferenceable pointer to point to a structure in dma-able
150 * memory (which is 32 bits) so that we can use all of the structure
151 * operations but take the address at the end. This macro allows us
152 * to truncate the 64 bit pointer down to 32 bits without the compiler
154 #define to32bit(x) ((__u32)((unsigned long)(x)))
159 #define STATIC static
162 MODULE_AUTHOR("James Bottomley");
163 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
164 MODULE_LICENSE("GPL");
166 /* This is the script */
167 #include "53c700_d.h"
170 STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
171 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
172 STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
173 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
174 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
175 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
176 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
177 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
178 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
179 static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
181 STATIC struct device_attribute *NCR_700_dev_attrs[];
183 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
185 static char *NCR_700_phase[] = {
188 "before command phase",
189 "after command phase",
190 "after status phase",
191 "after data in phase",
192 "after data out phase",
196 static char *NCR_700_condition[] = {
204 "REJECT_MSG RECEIVED",
205 "DISCONNECT_MSG RECEIVED",
211 static char *NCR_700_fatal_messages[] = {
212 "unexpected message after reselection",
213 "still MSG_OUT after message injection",
214 "not MSG_IN after selection",
215 "Illegal message length received",
218 static char *NCR_700_SBCL_bits[] = {
229 static char *NCR_700_SBCL_to_phase[] = {
240 static __u8 NCR_700_SDTR_msg[] = {
241 0x01, /* Extended message */
242 0x03, /* Extended message Length */
243 0x01, /* SDTR Extended message */
248 /* This translates the SDTR message offset and period to a value
249 * which can be loaded into the SXFER_REG.
251 * NOTE: According to SCSI-2, the true transfer period (in ns) is
252 * actually four times this period value */
254 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
255 __u8 offset, __u8 period)
259 __u8 min_xferp = (hostdata->chip710
260 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
261 __u8 max_offset = (hostdata->chip710
262 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
267 if(period < hostdata->min_period) {
268 printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_SDTR_msg[3]*4);
269 period = hostdata->min_period;
271 XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
272 if(offset > max_offset) {
273 printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
277 if(XFERP < min_xferp) {
278 printk(KERN_WARNING "53c700: XFERP %d is less than minium, setting to %d\n",
282 return (offset & 0x0f) | (XFERP & 0x07)<<4;
286 NCR_700_get_SXFER(struct scsi_device *SDp)
288 struct NCR_700_Host_Parameters *hostdata =
289 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
291 return NCR_700_offset_period_to_sxfer(hostdata,
292 spi_offset(SDp->sdev_target),
293 spi_period(SDp->sdev_target));
297 NCR_700_detect(struct scsi_host_template *tpnt,
298 struct NCR_700_Host_Parameters *hostdata, struct device *dev)
300 dma_addr_t pScript, pSlots;
303 struct Scsi_Host *host;
304 static int banner = 0;
307 if(tpnt->sdev_attrs == NULL)
308 tpnt->sdev_attrs = NCR_700_dev_attrs;
310 memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
311 &pScript, GFP_KERNEL);
313 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
317 script = (__u32 *)memory;
318 hostdata->msgin = memory + MSGIN_OFFSET;
319 hostdata->msgout = memory + MSGOUT_OFFSET;
320 hostdata->status = memory + STATUS_OFFSET;
321 /* all of these offsets are L1_CACHE_BYTES separated. It is fatal
322 * if this isn't sufficient separation to avoid dma flushing issues */
323 BUG_ON(!dma_is_consistent(pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
324 hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
327 pSlots = pScript + SLOTS_OFFSET;
329 /* Fill in the missing routines from the host template */
330 tpnt->queuecommand = NCR_700_queuecommand;
331 tpnt->eh_abort_handler = NCR_700_abort;
332 tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
333 tpnt->eh_host_reset_handler = NCR_700_host_reset;
334 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
335 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
336 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
337 tpnt->use_clustering = ENABLE_CLUSTERING;
338 tpnt->slave_configure = NCR_700_slave_configure;
339 tpnt->slave_destroy = NCR_700_slave_destroy;
340 tpnt->change_queue_depth = NCR_700_change_queue_depth;
341 tpnt->change_queue_type = NCR_700_change_queue_type;
343 if(tpnt->name == NULL)
344 tpnt->name = "53c700";
345 if(tpnt->proc_name == NULL)
346 tpnt->proc_name = "53c700";
349 host = scsi_host_alloc(tpnt, 4);
352 memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
353 * NCR_700_COMMAND_SLOTS_PER_HOST);
354 for(j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
355 dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
356 - (unsigned long)&hostdata->slots[0].SG[0]);
357 hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
359 hostdata->free_list = &hostdata->slots[j];
361 hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
362 hostdata->slots[j].state = NCR_700_SLOT_FREE;
365 for(j = 0; j < sizeof(SCRIPT)/sizeof(SCRIPT[0]); j++) {
366 script[j] = bS_to_host(SCRIPT[j]);
369 /* adjust all labels to be bus physical */
370 for(j = 0; j < PATCHES; j++) {
371 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
373 /* now patch up fixed addresses. */
374 script_patch_32(script, MessageLocation,
375 pScript + MSGOUT_OFFSET);
376 script_patch_32(script, StatusAddress,
377 pScript + STATUS_OFFSET);
378 script_patch_32(script, ReceiveMsgAddress,
379 pScript + MSGIN_OFFSET);
381 hostdata->script = script;
382 hostdata->pScript = pScript;
383 dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
384 hostdata->state = NCR_700_HOST_FREE;
385 hostdata->cmd = NULL;
387 host->max_lun = NCR_700_MAX_LUNS;
388 BUG_ON(NCR_700_transport_template == NULL);
389 host->transportt = NCR_700_transport_template;
390 host->unique_id = (unsigned long)hostdata->base;
391 hostdata->eh_complete = NULL;
392 host->hostdata[0] = (unsigned long)hostdata;
394 NCR_700_writeb(0xff, host, CTEST9_REG);
395 if(hostdata->chip710)
396 hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
398 hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
399 hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
401 printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
404 printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
405 hostdata->chip710 ? "53c710" :
406 (hostdata->fast ? "53c700-66" : "53c700"),
407 hostdata->rev, hostdata->differential ?
408 "(Differential)" : "");
410 NCR_700_chip_reset(host);
412 if (scsi_add_host(host, dev)) {
413 dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
418 spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
425 NCR_700_release(struct Scsi_Host *host)
427 struct NCR_700_Host_Parameters *hostdata =
428 (struct NCR_700_Host_Parameters *)host->hostdata[0];
430 dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
431 hostdata->script, hostdata->pScript);
436 NCR_700_identify(int can_disconnect, __u8 lun)
438 return IDENTIFY_BASE |
439 ((can_disconnect) ? 0x40 : 0) |
440 (lun & NCR_700_LUN_MASK);
444 * Function : static int data_residual (Scsi_Host *host)
446 * Purpose : return residual data count of what's in the chip. If you
447 * really want to know what this function is doing, it's almost a
448 * direct transcription of the algorithm described in the 53c710
449 * guide, except that the DBC and DFIFO registers are only 6 bits
452 * Inputs : host - SCSI host */
454 NCR_700_data_residual (struct Scsi_Host *host) {
455 struct NCR_700_Host_Parameters *hostdata =
456 (struct NCR_700_Host_Parameters *)host->hostdata[0];
457 int count, synchronous = 0;
460 if(hostdata->chip710) {
461 count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
462 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
464 count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
465 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
469 synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
471 /* get the data direction */
472 ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
477 count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
479 if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
483 __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
484 if (sstat & SODL_REG_FULL)
486 if (synchronous && (sstat & SODR_REG_FULL))
491 printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
496 /* print out the SCSI wires and corresponding phase from the SBCL register
499 sbcl_to_string(__u8 sbcl)
502 static char ret[256];
507 strcat(ret, NCR_700_SBCL_bits[i]);
509 strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
514 bitmap_to_number(__u8 bitmap)
518 for(i=0; i<8 && !(bitmap &(1<<i)); i++)
523 /* Pull a slot off the free list */
524 STATIC struct NCR_700_command_slot *
525 find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
527 struct NCR_700_command_slot *slot = hostdata->free_list;
531 if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
532 printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
536 if(slot->state != NCR_700_SLOT_FREE)
538 printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
541 hostdata->free_list = slot->ITL_forw;
542 slot->ITL_forw = NULL;
545 /* NOTE: set the state to busy here, not queued, since this
546 * indicates the slot is in use and cannot be run by the IRQ
547 * finish routine. If we cannot queue the command when it
548 * is properly build, we then change to NCR_700_SLOT_QUEUED */
549 slot->state = NCR_700_SLOT_BUSY;
550 hostdata->command_slot_count++;
556 free_slot(struct NCR_700_command_slot *slot,
557 struct NCR_700_Host_Parameters *hostdata)
559 if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
560 printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
562 if(slot->state == NCR_700_SLOT_FREE) {
563 printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
566 slot->resume_offset = 0;
568 slot->state = NCR_700_SLOT_FREE;
569 slot->ITL_forw = hostdata->free_list;
570 hostdata->free_list = slot;
571 hostdata->command_slot_count--;
575 /* This routine really does very little. The command is indexed on
576 the ITL and (if tagged) the ITLQ lists in _queuecommand */
578 save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
579 struct scsi_cmnd *SCp, __u32 dsp)
581 /* Its just possible that this gets executed twice */
583 struct NCR_700_command_slot *slot =
584 (struct NCR_700_command_slot *)SCp->host_scribble;
586 slot->resume_offset = dsp;
588 hostdata->state = NCR_700_HOST_FREE;
589 hostdata->cmd = NULL;
593 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
594 struct NCR_700_command_slot *slot)
596 if(SCp->sc_data_direction != DMA_NONE &&
597 SCp->sc_data_direction != DMA_BIDIRECTIONAL) {
599 dma_unmap_sg(hostdata->dev, SCp->buffer,
600 SCp->use_sg, SCp->sc_data_direction);
602 dma_unmap_single(hostdata->dev, slot->dma_handle,
603 SCp->request_bufflen,
604 SCp->sc_data_direction);
610 NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
611 struct scsi_cmnd *SCp, int result)
613 hostdata->state = NCR_700_HOST_FREE;
614 hostdata->cmd = NULL;
617 struct NCR_700_command_slot *slot =
618 (struct NCR_700_command_slot *)SCp->host_scribble;
620 NCR_700_unmap(hostdata, SCp, slot);
621 dma_unmap_single(hostdata->dev, slot->pCmd,
622 sizeof(SCp->cmnd), DMA_TO_DEVICE);
623 if(SCp->cmnd[0] == REQUEST_SENSE && SCp->cmnd[6] == NCR_700_INTERNAL_SENSE_MAGIC) {
625 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
626 SCp, SCp->cmnd[7], result);
627 scsi_print_sense("53c700", SCp);
630 /* restore the old result if the request sense was
633 result = SCp->cmnd[7];
634 /* now restore the original command */
635 memcpy((void *) SCp->cmnd, (void *) SCp->data_cmnd,
636 sizeof(SCp->data_cmnd));
637 SCp->request_buffer = SCp->buffer;
638 SCp->request_bufflen = SCp->bufflen;
639 SCp->use_sg = SCp->old_use_sg;
640 SCp->cmd_len = SCp->old_cmd_len;
641 SCp->sc_data_direction = SCp->sc_old_data_direction;
642 SCp->underflow = SCp->old_underflow;
645 free_slot(slot, hostdata);
647 if(NCR_700_get_depth(SCp->device) == 0 ||
648 NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
649 printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
650 NCR_700_get_depth(SCp->device));
651 #endif /* NCR_700_DEBUG */
652 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
654 SCp->host_scribble = NULL;
655 SCp->result = result;
658 printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
664 NCR_700_internal_bus_reset(struct Scsi_Host *host)
667 NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
669 NCR_700_writeb(0, host, SCNTL1_REG);
674 NCR_700_chip_setup(struct Scsi_Host *host)
676 struct NCR_700_Host_Parameters *hostdata =
677 (struct NCR_700_Host_Parameters *)host->hostdata[0];
678 __u32 dcntl_extra = 0;
680 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
682 if(hostdata->chip710) {
683 __u8 burst_disable = hostdata->burst_disable
685 dcntl_extra = COMPAT_700_MODE;
687 NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
688 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
689 host, DMODE_710_REG);
690 NCR_700_writeb(burst_disable | (hostdata->differential ?
691 DIFF : 0), host, CTEST7_REG);
692 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
693 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
694 | AUTO_ATN, host, SCNTL0_REG);
696 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
697 host, DMODE_700_REG);
698 NCR_700_writeb(hostdata->differential ?
699 DIFF : 0, host, CTEST7_REG);
701 /* this is for 700-66, does nothing on 700 */
702 NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
703 | GENERATE_RECEIVE_PARITY, host,
706 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
707 | PARITY | AUTO_ATN, host, SCNTL0_REG);
711 NCR_700_writeb(1 << host->this_id, host, SCID_REG);
712 NCR_700_writeb(0, host, SBCL_REG);
713 NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
715 NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
716 | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
718 NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
719 NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
720 if(hostdata->clock > 75) {
721 printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
722 /* do the best we can, but the async clock will be out
723 * of spec: sync divider 2, async divider 3 */
724 DEBUG(("53c700: sync 2 async 3\n"));
725 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
726 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
727 hostdata->sync_clock = hostdata->clock/2;
728 } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
729 /* sync divider 1.5, async divider 3 */
730 DEBUG(("53c700: sync 1.5 async 3\n"));
731 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
732 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
733 hostdata->sync_clock = hostdata->clock*2;
734 hostdata->sync_clock /= 3;
736 } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
737 /* sync divider 1, async divider 2 */
738 DEBUG(("53c700: sync 1 async 2\n"));
739 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
740 NCR_700_writeb(ASYNC_DIV_2_0 | dcntl_extra, host, DCNTL_REG);
741 hostdata->sync_clock = hostdata->clock;
742 } else if(hostdata->clock > 25 && hostdata->clock <=37) {
743 /* sync divider 1, async divider 1.5 */
744 DEBUG(("53c700: sync 1 async 1.5\n"));
745 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
746 NCR_700_writeb(ASYNC_DIV_1_5 | dcntl_extra, host, DCNTL_REG);
747 hostdata->sync_clock = hostdata->clock;
749 DEBUG(("53c700: sync 1 async 1\n"));
750 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
751 NCR_700_writeb(ASYNC_DIV_1_0 | dcntl_extra, host, DCNTL_REG);
752 /* sync divider 1, async divider 1 */
753 hostdata->sync_clock = hostdata->clock;
755 /* Calculate the actual minimum period that can be supported
756 * by our synchronous clock speed. See the 710 manual for
757 * exact details of this calculation which is based on a
758 * setting of the SXFER register */
759 min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
760 hostdata->min_period = NCR_700_MIN_PERIOD;
761 if(min_period > NCR_700_MIN_PERIOD)
762 hostdata->min_period = min_period;
766 NCR_700_chip_reset(struct Scsi_Host *host)
768 struct NCR_700_Host_Parameters *hostdata =
769 (struct NCR_700_Host_Parameters *)host->hostdata[0];
770 if(hostdata->chip710) {
771 NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
774 NCR_700_writeb(0, host, ISTAT_REG);
776 NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
779 NCR_700_writeb(0, host, DCNTL_REG);
784 NCR_700_chip_setup(host);
787 /* The heart of the message processing engine is that the instruction
788 * immediately after the INT is the normal case (and so must be CLEAR
789 * ACK). If we want to do something else, we call that routine in
790 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
791 * ACK) so that the routine returns correctly to resume its activity
794 process_extended_message(struct Scsi_Host *host,
795 struct NCR_700_Host_Parameters *hostdata,
796 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
798 __u32 resume_offset = dsp, temp = dsp + 8;
799 __u8 pun = 0xff, lun = 0xff;
802 pun = SCp->device->id;
803 lun = SCp->device->lun;
806 switch(hostdata->msgin[2]) {
808 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
809 struct scsi_target *starget = SCp->device->sdev_target;
810 __u8 period = hostdata->msgin[3];
811 __u8 offset = hostdata->msgin[4];
813 if(offset == 0 || period == 0) {
818 spi_offset(starget) = offset;
819 spi_period(starget) = period;
821 if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
822 spi_display_xfer_agreement(starget);
823 NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
826 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
827 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
829 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
833 /* SDTR message out of the blue, reject it */
834 printk(KERN_WARNING "scsi%d Unexpected SDTR msg\n",
836 hostdata->msgout[0] = A_REJECT_MSG;
837 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
838 script_patch_16(hostdata->script, MessageCount, 1);
839 /* SendMsgOut returns, so set up the return
841 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
846 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
847 host->host_no, pun, lun);
848 hostdata->msgout[0] = A_REJECT_MSG;
849 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
850 script_patch_16(hostdata->script, MessageCount, 1);
851 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
856 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
857 host->host_no, pun, lun,
858 NCR_700_phase[(dsps & 0xf00) >> 8]);
859 scsi_print_msg(hostdata->msgin);
862 hostdata->msgout[0] = A_REJECT_MSG;
863 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
864 script_patch_16(hostdata->script, MessageCount, 1);
865 /* SendMsgOut returns, so set up the return
867 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
869 NCR_700_writel(temp, host, TEMP_REG);
870 return resume_offset;
874 process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
875 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
877 /* work out where to return to */
878 __u32 temp = dsp + 8, resume_offset = dsp;
879 __u8 pun = 0xff, lun = 0xff;
882 pun = SCp->device->id;
883 lun = SCp->device->lun;
887 printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
888 NCR_700_phase[(dsps & 0xf00) >> 8]);
889 scsi_print_msg(hostdata->msgin);
893 switch(hostdata->msgin[0]) {
896 resume_offset = process_extended_message(host, hostdata, SCp,
901 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
902 /* Rejected our sync negotiation attempt */
903 spi_period(SCp->device->sdev_target) =
904 spi_offset(SCp->device->sdev_target) = 0;
905 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
906 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
907 } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
908 /* rejected our first simple tag message */
909 printk(KERN_WARNING "scsi%d (%d:%d) Rejected first tag queue attempt, turning off tag queueing\n", host->host_no, pun, lun);
910 /* we're done negotiating */
911 NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
912 hostdata->tag_negotiated &= ~(1<<SCp->device->id);
913 SCp->device->tagged_supported = 0;
914 scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
916 printk(KERN_WARNING "scsi%d (%d:%d) Unexpected REJECT Message %s\n",
917 host->host_no, pun, lun,
918 NCR_700_phase[(dsps & 0xf00) >> 8]);
919 /* however, just ignore it */
923 case A_PARITY_ERROR_MSG:
924 printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
926 NCR_700_internal_bus_reset(host);
928 case A_SIMPLE_TAG_MSG:
929 printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
930 pun, lun, hostdata->msgin[1],
931 NCR_700_phase[(dsps & 0xf00) >> 8]);
935 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
936 host->host_no, pun, lun,
937 NCR_700_phase[(dsps & 0xf00) >> 8]);
939 scsi_print_msg(hostdata->msgin);
942 hostdata->msgout[0] = A_REJECT_MSG;
943 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
944 script_patch_16(hostdata->script, MessageCount, 1);
945 /* SendMsgOut returns, so set up the return
947 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
951 NCR_700_writel(temp, host, TEMP_REG);
952 /* set us up to receive another message */
953 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
954 return resume_offset;
958 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
959 struct Scsi_Host *host,
960 struct NCR_700_Host_Parameters *hostdata)
962 __u32 resume_offset = 0;
963 __u8 pun = 0xff, lun=0xff;
966 pun = SCp->device->id;
967 lun = SCp->device->lun;
970 if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
971 DEBUG((" COMMAND COMPLETE, status=%02x\n",
972 hostdata->status[0]));
973 /* OK, if TCQ still under negotiation, we now know it works */
974 if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
975 NCR_700_set_tag_neg_state(SCp->device,
976 NCR_700_FINISHED_TAG_NEGOTIATION);
978 /* check for contingent allegiance contitions */
979 if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
980 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
981 struct NCR_700_command_slot *slot =
982 (struct NCR_700_command_slot *)SCp->host_scribble;
983 if(SCp->cmnd[0] == REQUEST_SENSE) {
984 /* OOPS: bad device, returning another
985 * contingent allegiance condition */
986 printk(KERN_ERR "scsi%d (%d:%d) broken device is looping in contingent allegiance: ignoring\n", host->host_no, pun, lun);
987 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
990 scsi_print_command(SCp);
991 printk(" cmd %p has status %d, requesting sense\n",
992 SCp, hostdata->status[0]);
994 /* we can destroy the command here
995 * because the contingent allegiance
996 * condition will cause a retry which
997 * will re-copy the command from the
998 * saved data_cmnd. We also unmap any
999 * data associated with the command
1001 NCR_700_unmap(hostdata, SCp, slot);
1003 SCp->cmnd[0] = REQUEST_SENSE;
1004 SCp->cmnd[1] = (SCp->device->lun & 0x7) << 5;
1007 SCp->cmnd[4] = sizeof(SCp->sense_buffer);
1010 /* Here's a quiet hack: the
1011 * REQUEST_SENSE command is six bytes,
1012 * so store a flag indicating that
1013 * this was an internal sense request
1014 * and the original status at the end
1016 SCp->cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1017 SCp->cmnd[7] = hostdata->status[0];
1019 SCp->sc_data_direction = DMA_FROM_DEVICE;
1020 dma_sync_single_for_device(hostdata->dev, slot->pCmd,
1021 SCp->cmd_len, DMA_TO_DEVICE);
1022 SCp->request_bufflen = sizeof(SCp->sense_buffer);
1023 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1024 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
1025 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1026 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1027 slot->SG[1].pAddr = 0;
1028 slot->resume_offset = hostdata->pScript;
1029 dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1030 dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1032 /* queue the command for reissue */
1033 slot->state = NCR_700_SLOT_QUEUED;
1034 hostdata->state = NCR_700_HOST_FREE;
1035 hostdata->cmd = NULL;
1038 // Currently rely on the mid layer evaluation
1039 // of the tag queuing capability
1041 //if(status_byte(hostdata->status[0]) == GOOD &&
1042 // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1043 // /* Piggy back the tag queueing support
1044 // * on this command */
1045 // dma_sync_single_for_cpu(hostdata->dev,
1046 // slot->dma_handle,
1047 // SCp->request_bufflen,
1048 // DMA_FROM_DEVICE);
1049 // if(((char *)SCp->request_buffer)[7] & 0x02) {
1050 // printk(KERN_INFO "scsi%d: (%d:%d) Enabling Tag Command Queuing\n", host->host_no, pun, lun);
1051 // hostdata->tag_negotiated |= (1<<SCp->device->id);
1052 // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1054 // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1055 // hostdata->tag_negotiated &= ~(1<<SCp->device->id);
1058 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1060 } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1061 __u8 i = (dsps & 0xf00) >> 8;
1063 printk(KERN_ERR "scsi%d: (%d:%d), UNEXPECTED PHASE %s (%s)\n",
1064 host->host_no, pun, lun,
1066 sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1067 printk(KERN_ERR " len = %d, cmd =", SCp->cmd_len);
1068 scsi_print_command(SCp);
1070 NCR_700_internal_bus_reset(host);
1071 } else if((dsps & 0xfffff000) == A_FATAL) {
1072 int i = (dsps & 0xfff);
1074 printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1075 host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1076 if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1077 printk(KERN_ERR " msg begins %02x %02x\n",
1078 hostdata->msgin[0], hostdata->msgin[1]);
1080 NCR_700_internal_bus_reset(host);
1081 } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1082 #ifdef NCR_700_DEBUG
1083 __u8 i = (dsps & 0xf00) >> 8;
1085 printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1086 host->host_no, pun, lun,
1087 i, NCR_700_phase[i]);
1089 save_for_reselection(hostdata, SCp, dsp);
1091 } else if(dsps == A_RESELECTION_IDENTIFIED) {
1093 struct NCR_700_command_slot *slot;
1094 __u8 reselection_id = hostdata->reselection_id;
1095 struct scsi_device *SDp;
1097 lun = hostdata->msgin[0] & 0x1f;
1099 hostdata->reselection_id = 0xff;
1100 DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1101 host->host_no, reselection_id, lun));
1102 /* clear the reselection indicator */
1103 SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1104 if(unlikely(SDp == NULL)) {
1105 printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1106 host->host_no, reselection_id, lun);
1109 if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1110 struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1111 if(unlikely(SCp == NULL)) {
1112 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1113 host->host_no, reselection_id, lun, hostdata->msgin[2]);
1117 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1118 DEBUG(("53c700: %d:%d:%d, reselection is tag %d, slot %p(%d)\n",
1119 host->host_no, SDp->id, SDp->lun,
1120 hostdata->msgin[2], slot, slot->tag));
1122 struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1123 if(unlikely(SCp == NULL)) {
1124 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for untagged cmd\n",
1125 host->host_no, reselection_id, lun);
1128 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1132 printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1133 host->host_no, reselection_id, lun,
1134 hostdata->msgin[0], hostdata->msgin[1],
1135 hostdata->msgin[2]);
1137 if(hostdata->state != NCR_700_HOST_BUSY)
1138 printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1140 resume_offset = slot->resume_offset;
1141 hostdata->cmd = slot->cmnd;
1143 /* re-patch for this command */
1144 script_patch_32_abs(hostdata->script, CommandAddress,
1146 script_patch_16(hostdata->script,
1147 CommandCount, slot->cmnd->cmd_len);
1148 script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1149 to32bit(&slot->pSG[0].ins));
1151 /* Note: setting SXFER only works if we're
1152 * still in the MESSAGE phase, so it is vital
1153 * that ACK is still asserted when we process
1154 * the reselection message. The resume offset
1155 * should therefore always clear ACK */
1156 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1158 dma_cache_sync(hostdata->msgin,
1159 MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1160 dma_cache_sync(hostdata->msgout,
1161 MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1162 /* I'm just being paranoid here, the command should
1163 * already have been flushed from the cache */
1164 dma_cache_sync(slot->cmnd->cmnd,
1165 slot->cmnd->cmd_len, DMA_TO_DEVICE);
1170 } else if(dsps == A_RESELECTED_DURING_SELECTION) {
1172 /* This section is full of debugging code because I've
1173 * never managed to reach it. I think what happens is
1174 * that, because the 700 runs with selection
1175 * interrupts enabled the whole time that we take a
1176 * selection interrupt before we manage to get to the
1177 * reselected script interrupt */
1179 __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1180 struct NCR_700_command_slot *slot;
1182 /* Take out our own ID */
1183 reselection_id &= ~(1<<host->this_id);
1185 /* I've never seen this happen, so keep this as a printk rather
1187 printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1188 host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1191 /* FIXME: DEBUGGING CODE */
1192 __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1195 for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1196 if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1197 && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1200 printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1201 SCp = hostdata->slots[i].cmnd;
1205 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1206 /* change slot from busy to queued to redo command */
1207 slot->state = NCR_700_SLOT_QUEUED;
1209 hostdata->cmd = NULL;
1211 if(reselection_id == 0) {
1212 if(hostdata->reselection_id == 0xff) {
1213 printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1216 printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1218 reselection_id = hostdata->reselection_id;
1222 /* convert to real ID */
1223 reselection_id = bitmap_to_number(reselection_id);
1225 hostdata->reselection_id = reselection_id;
1226 /* just in case we have a stale simple tag message, clear it */
1227 hostdata->msgin[1] = 0;
1228 dma_cache_sync(hostdata->msgin,
1229 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1230 if(hostdata->tag_negotiated & (1<<reselection_id)) {
1231 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1233 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1235 } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1236 /* we've just disconnected from the bus, do nothing since
1237 * a return here will re-run the queued command slot
1238 * that may have been interrupted by the initial selection */
1239 DEBUG((" SELECTION COMPLETED\n"));
1240 } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1241 resume_offset = process_message(host, hostdata, SCp,
1243 } else if((dsps & 0xfffff000) == 0) {
1244 __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1245 printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1246 host->host_no, pun, lun, NCR_700_condition[i],
1247 NCR_700_phase[j], dsp - hostdata->pScript);
1249 scsi_print_command(SCp);
1252 for(i = 0; i < SCp->use_sg + 1; i++) {
1253 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, ((struct scatterlist *)SCp->buffer)[i].length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1257 NCR_700_internal_bus_reset(host);
1258 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1259 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1260 host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1261 resume_offset = dsp;
1263 printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1264 host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1265 NCR_700_internal_bus_reset(host);
1267 return resume_offset;
1270 /* We run the 53c700 with selection interrupts always enabled. This
1271 * means that the chip may be selected as soon as the bus frees. On a
1272 * busy bus, this can be before the scripts engine finishes its
1273 * processing. Therefore, part of the selection processing has to be
1274 * to find out what the scripts engine is doing and complete the
1275 * function if necessary (i.e. process the pending disconnect or save
1276 * the interrupted initial selection */
1278 process_selection(struct Scsi_Host *host, __u32 dsp)
1280 __u8 id = 0; /* Squash compiler warning */
1282 __u32 resume_offset = 0;
1283 struct NCR_700_Host_Parameters *hostdata =
1284 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1285 struct scsi_cmnd *SCp = hostdata->cmd;
1288 for(count = 0; count < 5; count++) {
1289 id = NCR_700_readb(host, hostdata->chip710 ?
1290 CTEST9_REG : SFBR_REG);
1292 /* Take out our own ID */
1293 id &= ~(1<<host->this_id);
1298 sbcl = NCR_700_readb(host, SBCL_REG);
1299 if((sbcl & SBCL_IO) == 0) {
1300 /* mark as having been selected rather than reselected */
1303 /* convert to real ID */
1304 hostdata->reselection_id = id = bitmap_to_number(id);
1305 DEBUG(("scsi%d: Reselected by %d\n",
1306 host->host_no, id));
1308 if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1309 struct NCR_700_command_slot *slot =
1310 (struct NCR_700_command_slot *)SCp->host_scribble;
1311 DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1313 switch(dsp - hostdata->pScript) {
1314 case Ent_Disconnect1:
1315 case Ent_Disconnect2:
1316 save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1318 case Ent_Disconnect3:
1319 case Ent_Disconnect4:
1320 save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1322 case Ent_Disconnect5:
1323 case Ent_Disconnect6:
1324 save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1326 case Ent_Disconnect7:
1327 case Ent_Disconnect8:
1328 save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1332 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1336 slot->state = NCR_700_SLOT_QUEUED;
1340 hostdata->state = NCR_700_HOST_BUSY;
1341 hostdata->cmd = NULL;
1342 /* clear any stale simple tag message */
1343 hostdata->msgin[1] = 0;
1344 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1348 /* Selected as target, Ignore */
1349 resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1350 } else if(hostdata->tag_negotiated & (1<<id)) {
1351 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1353 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1355 return resume_offset;
1359 NCR_700_clear_fifo(struct Scsi_Host *host) {
1360 const struct NCR_700_Host_Parameters *hostdata
1361 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1362 if(hostdata->chip710) {
1363 NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1365 NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1370 NCR_700_flush_fifo(struct Scsi_Host *host) {
1371 const struct NCR_700_Host_Parameters *hostdata
1372 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1373 if(hostdata->chip710) {
1374 NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1376 NCR_700_writeb(0, host, CTEST8_REG);
1378 NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1380 NCR_700_writeb(0, host, DFIFO_REG);
1385 /* The queue lock with interrupts disabled must be held on entry to
1388 NCR_700_start_command(struct scsi_cmnd *SCp)
1390 struct NCR_700_command_slot *slot =
1391 (struct NCR_700_command_slot *)SCp->host_scribble;
1392 struct NCR_700_Host_Parameters *hostdata =
1393 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1394 __u16 count = 1; /* for IDENTIFY message */
1396 if(hostdata->state != NCR_700_HOST_FREE) {
1397 /* keep this inside the lock to close the race window where
1398 * the running command finishes on another CPU while we don't
1399 * change the state to queued on this one */
1400 slot->state = NCR_700_SLOT_QUEUED;
1402 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1403 SCp->device->host->host_no, slot->cmnd, slot));
1406 hostdata->state = NCR_700_HOST_BUSY;
1407 hostdata->cmd = SCp;
1408 slot->state = NCR_700_SLOT_BUSY;
1409 /* keep interrupts disabled until we have the command correctly
1410 * set up so we cannot take a selection interrupt */
1412 hostdata->msgout[0] = NCR_700_identify(SCp->cmnd[0] != REQUEST_SENSE,
1414 /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1415 * if the negotiated transfer parameters still hold, so
1416 * always renegotiate them */
1417 if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE) {
1418 NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1421 /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1422 * If a contingent allegiance condition exists, the device
1423 * will refuse all tags, so send the request sense as untagged
1425 if((hostdata->tag_negotiated & (1<<SCp->device->id))
1426 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE)) {
1427 count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1430 if(hostdata->fast &&
1431 NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1432 memcpy(&hostdata->msgout[count], NCR_700_SDTR_msg,
1433 sizeof(NCR_700_SDTR_msg));
1434 hostdata->msgout[count+3] = spi_period(SCp->device->sdev_target);
1435 hostdata->msgout[count+4] = spi_offset(SCp->device->sdev_target);
1436 count += sizeof(NCR_700_SDTR_msg);
1437 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1440 script_patch_16(hostdata->script, MessageCount, count);
1443 script_patch_ID(hostdata->script,
1444 Device_ID, 1<<SCp->device->id);
1446 script_patch_32_abs(hostdata->script, CommandAddress,
1448 script_patch_16(hostdata->script, CommandCount, SCp->cmd_len);
1449 /* finally plumb the beginning of the SG list into the script
1451 script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1452 to32bit(&slot->pSG[0].ins));
1453 NCR_700_clear_fifo(SCp->device->host);
1455 if(slot->resume_offset == 0)
1456 slot->resume_offset = hostdata->pScript;
1457 /* now perform all the writebacks and invalidates */
1458 dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE);
1459 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1461 dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1462 dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE);
1464 /* set the synchronous period/offset */
1465 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1466 SCp->device->host, SXFER_REG);
1467 NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1468 NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1474 NCR_700_intr(int irq, void *dev_id, struct pt_regs *regs)
1476 struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1477 struct NCR_700_Host_Parameters *hostdata =
1478 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1480 __u32 resume_offset = 0;
1481 __u8 pun = 0xff, lun = 0xff;
1482 unsigned long flags;
1485 /* Use the host lock to serialise acess to the 53c700
1486 * hardware. Note: In future, we may need to take the queue
1487 * lock to enter the done routines. When that happens, we
1488 * need to ensure that for this driver, the host lock and the
1489 * queue lock point to the same thing. */
1490 spin_lock_irqsave(host->host_lock, flags);
1491 if((istat = NCR_700_readb(host, ISTAT_REG))
1492 & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1494 __u8 sstat0 = 0, dstat = 0;
1496 struct scsi_cmnd *SCp = hostdata->cmd;
1497 enum NCR_700_Host_State state;
1500 state = hostdata->state;
1501 SCp = hostdata->cmd;
1503 if(istat & SCSI_INT_PENDING) {
1506 sstat0 = NCR_700_readb(host, SSTAT0_REG);
1509 if(istat & DMA_INT_PENDING) {
1512 dstat = NCR_700_readb(host, DSTAT_REG);
1515 dsps = NCR_700_readl(host, DSPS_REG);
1516 dsp = NCR_700_readl(host, DSP_REG);
1518 DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1519 host->host_no, istat, sstat0, dstat,
1520 (dsp - (__u32)(hostdata->pScript))/4,
1524 pun = SCp->device->id;
1525 lun = SCp->device->lun;
1528 if(sstat0 & SCSI_RESET_DETECTED) {
1529 struct scsi_device *SDp;
1532 hostdata->state = NCR_700_HOST_BUSY;
1534 printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1535 host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1537 scsi_report_bus_reset(host, 0);
1539 /* clear all the negotiated parameters */
1540 __shost_for_each_device(SDp, host)
1541 SDp->hostdata = NULL;
1543 /* clear all the slots and their pending commands */
1544 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1545 struct scsi_cmnd *SCp;
1546 struct NCR_700_command_slot *slot =
1547 &hostdata->slots[i];
1549 if(slot->state == NCR_700_SLOT_FREE)
1553 printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1555 free_slot(slot, hostdata);
1556 SCp->host_scribble = NULL;
1557 NCR_700_set_depth(SCp->device, 0);
1558 /* NOTE: deadlock potential here: we
1559 * rely on mid-layer guarantees that
1560 * scsi_done won't try to issue the
1561 * command again otherwise we'll
1563 * hostdata->state_lock */
1564 SCp->result = DID_RESET << 16;
1565 SCp->scsi_done(SCp);
1568 NCR_700_chip_setup(host);
1570 hostdata->state = NCR_700_HOST_FREE;
1571 hostdata->cmd = NULL;
1572 /* signal back if this was an eh induced reset */
1573 if(hostdata->eh_complete != NULL)
1574 complete(hostdata->eh_complete);
1576 } else if(sstat0 & SELECTION_TIMEOUT) {
1577 DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1578 host->host_no, pun, lun));
1579 NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1580 } else if(sstat0 & PHASE_MISMATCH) {
1581 struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1582 (struct NCR_700_command_slot *)SCp->host_scribble;
1584 if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1585 /* It wants to reply to some part of
1587 #ifdef NCR_700_DEBUG
1588 __u32 temp = NCR_700_readl(host, TEMP_REG);
1589 int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1590 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1592 resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1593 } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1594 dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1595 int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1596 int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1597 int residual = NCR_700_data_residual(host);
1599 #ifdef NCR_700_DEBUG
1600 __u32 naddr = NCR_700_readl(host, DNAD_REG);
1602 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1603 host->host_no, pun, lun,
1604 SGcount, data_transfer);
1605 scsi_print_command(SCp);
1607 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1608 host->host_no, pun, lun,
1609 SGcount, data_transfer, residual);
1612 data_transfer += residual;
1614 if(data_transfer != 0) {
1620 count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1621 DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1622 slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1623 slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1624 pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1625 pAddr += (count - data_transfer);
1626 #ifdef NCR_700_DEBUG
1627 if(pAddr != naddr) {
1628 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1631 slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1633 /* set the executed moves to nops */
1634 for(i=0; i<SGcount; i++) {
1635 slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1636 slot->SG[i].pAddr = 0;
1638 dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1639 /* and pretend we disconnected after
1640 * the command phase */
1641 resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1642 /* make sure all the data is flushed */
1643 NCR_700_flush_fifo(host);
1645 __u8 sbcl = NCR_700_readb(host, SBCL_REG);
1646 printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1647 host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1648 NCR_700_internal_bus_reset(host);
1651 } else if(sstat0 & SCSI_GROSS_ERROR) {
1652 printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1653 host->host_no, pun, lun);
1654 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1655 } else if(sstat0 & PARITY_ERROR) {
1656 printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1657 host->host_no, pun, lun);
1658 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1659 } else if(dstat & SCRIPT_INT_RECEIVED) {
1660 DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1661 host->host_no, pun, lun));
1662 resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1663 } else if(dstat & (ILGL_INST_DETECTED)) {
1664 printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1665 " Please email James.Bottomley@HansenPartnership.com with the details\n",
1666 host->host_no, pun, lun,
1667 dsp, dsp - hostdata->pScript);
1668 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1669 } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1670 printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1671 host->host_no, pun, lun, dstat);
1672 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1676 /* NOTE: selection interrupt processing MUST occur
1677 * after script interrupt processing to correctly cope
1678 * with the case where we process a disconnect and
1679 * then get reselected before we process the
1681 if(sstat0 & SELECTED) {
1682 /* FIXME: It currently takes at least FOUR
1683 * interrupts to complete a command that
1684 * disconnects: one for the disconnect, one
1685 * for the reselection, one to get the
1686 * reselection data and one to complete the
1687 * command. If we guess the reselected
1688 * command here and prepare it, we only need
1689 * to get a reselection data interrupt if we
1690 * guessed wrongly. Since the interrupt
1691 * overhead is much greater than the command
1692 * setup, this would be an efficient
1693 * optimisation particularly as we probably
1694 * only have one outstanding command on a
1695 * target most of the time */
1697 resume_offset = process_selection(host, dsp);
1704 if(hostdata->state != NCR_700_HOST_BUSY) {
1705 printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1706 host->host_no, resume_offset, resume_offset - hostdata->pScript);
1707 hostdata->state = NCR_700_HOST_BUSY;
1710 DEBUG(("Attempting to resume at %x\n", resume_offset));
1711 NCR_700_clear_fifo(host);
1712 NCR_700_writel(resume_offset, host, DSP_REG);
1714 /* There is probably a technical no-no about this: If we're a
1715 * shared interrupt and we got this interrupt because the
1716 * other device needs servicing not us, we're still going to
1717 * check our queued commands here---of course, there shouldn't
1718 * be any outstanding.... */
1719 if(hostdata->state == NCR_700_HOST_FREE) {
1722 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1723 /* fairness: always run the queue from the last
1724 * position we left off */
1725 int j = (i + hostdata->saved_slot_position)
1726 % NCR_700_COMMAND_SLOTS_PER_HOST;
1728 if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1730 if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1731 DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1732 host->host_no, &hostdata->slots[j],
1733 hostdata->slots[j].cmnd));
1734 hostdata->saved_slot_position = j + 1;
1741 spin_unlock_irqrestore(host->host_lock, flags);
1742 return IRQ_RETVAL(handled);
1746 NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1748 struct NCR_700_Host_Parameters *hostdata =
1749 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1751 enum dma_data_direction direction;
1752 struct NCR_700_command_slot *slot;
1754 if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1755 /* We're over our allocation, this should never happen
1756 * since we report the max allocation to the mid layer */
1757 printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1760 /* check for untagged commands. We cannot have any outstanding
1761 * commands if we accept them. Commands could be untagged because:
1763 * - The tag negotiated bitmap is clear
1764 * - The blk layer sent and untagged command
1766 if(NCR_700_get_depth(SCp->device) != 0
1767 && (!(hostdata->tag_negotiated & (1<<SCp->device->id))
1768 || !blk_rq_tagged(SCp->request))) {
1769 DEBUG((KERN_ERR "scsi%d (%d:%d) has non zero depth %d\n",
1770 SCp->device->host->host_no, SCp->device->id, SCp->device->lun,
1771 NCR_700_get_depth(SCp->device)));
1772 return SCSI_MLQUEUE_DEVICE_BUSY;
1774 if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1775 DEBUG((KERN_ERR "scsi%d (%d:%d) has max tag depth %d\n",
1776 SCp->device->host->host_no, SCp->device->id, SCp->device->lun,
1777 NCR_700_get_depth(SCp->device)));
1778 return SCSI_MLQUEUE_DEVICE_BUSY;
1780 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1782 /* begin the command here */
1783 /* no need to check for NULL, test for command_slot_count above
1784 * ensures a slot is free */
1785 slot = find_empty_slot(hostdata);
1789 SCp->scsi_done = done;
1790 SCp->host_scribble = (unsigned char *)slot;
1791 SCp->SCp.ptr = NULL;
1792 SCp->SCp.buffer = NULL;
1794 #ifdef NCR_700_DEBUG
1795 printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1796 scsi_print_command(SCp);
1798 if(blk_rq_tagged(SCp->request)
1799 && (hostdata->tag_negotiated &(1<<SCp->device->id)) == 0
1800 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1801 printk(KERN_ERR "scsi%d: (%d:%d) Enabling Tag Command Queuing\n", SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1802 hostdata->tag_negotiated |= (1<<SCp->device->id);
1803 NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1806 /* here we may have to process an untagged command. The gate
1807 * above ensures that this will be the only one outstanding,
1808 * so clear the tag negotiated bit.
1810 * FIXME: This will royally screw up on multiple LUN devices
1812 if(!blk_rq_tagged(SCp->request)
1813 && (hostdata->tag_negotiated &(1<<SCp->device->id))) {
1814 printk(KERN_INFO "scsi%d: (%d:%d) Disabling Tag Command Queuing\n", SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1815 hostdata->tag_negotiated &= ~(1<<SCp->device->id);
1818 if((hostdata->tag_negotiated &(1<<SCp->device->id))
1819 && scsi_get_tag_type(SCp->device)) {
1820 slot->tag = SCp->request->tag;
1821 DEBUG(("53c700 %d:%d:%d, sending out tag %d, slot %p\n",
1822 SCp->device->host->host_no, SCp->device->id, SCp->device->lun, slot->tag,
1825 slot->tag = SCSI_NO_TAG;
1826 /* must populate current_cmnd for scsi_find_tag to work */
1827 SCp->device->current_cmnd = SCp;
1829 /* sanity check: some of the commands generated by the mid-layer
1830 * have an eccentric idea of their sc_data_direction */
1831 if(!SCp->use_sg && !SCp->request_bufflen
1832 && SCp->sc_data_direction != DMA_NONE) {
1833 #ifdef NCR_700_DEBUG
1834 printk("53c700: Command");
1835 scsi_print_command(SCp);
1836 printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1838 SCp->sc_data_direction = DMA_NONE;
1841 switch (SCp->cmnd[0]) {
1843 /* clear the internal sense magic */
1847 /* OK, get it from the command */
1848 switch(SCp->sc_data_direction) {
1849 case DMA_BIDIRECTIONAL:
1851 printk(KERN_ERR "53c700: Unknown command for data direction ");
1852 scsi_print_command(SCp);
1859 case DMA_FROM_DEVICE:
1860 move_ins = SCRIPT_MOVE_DATA_IN;
1863 move_ins = SCRIPT_MOVE_DATA_OUT;
1868 /* now build the scatter gather list */
1869 direction = SCp->sc_data_direction;
1873 dma_addr_t vPtr = 0;
1877 sg_count = dma_map_sg(hostdata->dev, SCp->buffer,
1878 SCp->use_sg, direction);
1880 vPtr = dma_map_single(hostdata->dev,
1881 SCp->request_buffer,
1882 SCp->request_bufflen,
1884 count = SCp->request_bufflen;
1885 slot->dma_handle = vPtr;
1890 for(i = 0; i < sg_count; i++) {
1893 struct scatterlist *sg = SCp->buffer;
1895 vPtr = sg_dma_address(&sg[i]);
1896 count = sg_dma_len(&sg[i]);
1899 slot->SG[i].ins = bS_to_host(move_ins | count);
1900 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1901 i, count, slot->SG[i].ins, (unsigned long)vPtr));
1902 slot->SG[i].pAddr = bS_to_host(vPtr);
1904 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1905 slot->SG[i].pAddr = 0;
1906 dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1907 DEBUG((" SETTING %08lx to %x\n",
1908 (&slot->pSG[i].ins),
1911 slot->resume_offset = 0;
1912 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1913 sizeof(SCp->cmnd), DMA_TO_DEVICE);
1914 NCR_700_start_command(SCp);
1919 NCR_700_abort(struct scsi_cmnd * SCp)
1921 struct NCR_700_command_slot *slot;
1923 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants to abort command\n\t",
1924 SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1925 scsi_print_command(SCp);
1927 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1930 /* no outstanding command to abort */
1932 if(SCp->cmnd[0] == TEST_UNIT_READY) {
1933 /* FIXME: This is because of a problem in the new
1934 * error handler. When it is in error recovery, it
1935 * will send a TUR to a device it thinks may still be
1936 * showing a problem. If the TUR isn't responded to,
1937 * it will abort it and mark the device off line.
1938 * Unfortunately, it does no other error recovery, so
1939 * this would leave us with an outstanding command
1940 * occupying a slot. Rather than allow this to
1941 * happen, we issue a bus reset to force all
1942 * outstanding commands to terminate here. */
1943 NCR_700_internal_bus_reset(SCp->device->host);
1944 /* still drop through and return failed */
1951 NCR_700_bus_reset(struct scsi_cmnd * SCp)
1953 DECLARE_COMPLETION(complete);
1954 struct NCR_700_Host_Parameters *hostdata =
1955 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1957 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants BUS reset, cmd %p\n\t",
1958 SCp->device->host->host_no, SCp->device->id, SCp->device->lun, SCp);
1959 scsi_print_command(SCp);
1961 /* In theory, eh_complete should always be null because the
1962 * eh is single threaded, but just in case we're handling a
1963 * reset via sg or something */
1964 spin_lock_irq(SCp->device->host->host_lock);
1965 while (hostdata->eh_complete != NULL) {
1966 spin_unlock_irq(SCp->device->host->host_lock);
1967 msleep_interruptible(100);
1968 spin_lock_irq(SCp->device->host->host_lock);
1971 hostdata->eh_complete = &complete;
1972 NCR_700_internal_bus_reset(SCp->device->host);
1974 spin_unlock_irq(SCp->device->host->host_lock);
1975 wait_for_completion(&complete);
1976 spin_lock_irq(SCp->device->host->host_lock);
1978 hostdata->eh_complete = NULL;
1979 /* Revalidate the transport parameters of the failing device */
1981 spi_schedule_dv_device(SCp->device);
1983 spin_unlock_irq(SCp->device->host->host_lock);
1988 NCR_700_host_reset(struct scsi_cmnd * SCp)
1990 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants HOST reset\n\t",
1991 SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1992 scsi_print_command(SCp);
1994 spin_lock_irq(SCp->device->host->host_lock);
1996 NCR_700_internal_bus_reset(SCp->device->host);
1997 NCR_700_chip_reset(SCp->device->host);
1999 spin_unlock_irq(SCp->device->host->host_lock);
2005 NCR_700_set_period(struct scsi_target *STp, int period)
2007 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2008 struct NCR_700_Host_Parameters *hostdata =
2009 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2014 if(period < hostdata->min_period)
2015 period = hostdata->min_period;
2017 spi_period(STp) = period;
2018 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2019 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2020 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2024 NCR_700_set_offset(struct scsi_target *STp, int offset)
2026 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2027 struct NCR_700_Host_Parameters *hostdata =
2028 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2029 int max_offset = hostdata->chip710
2030 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2035 if(offset > max_offset)
2036 offset = max_offset;
2038 /* if we're currently async, make sure the period is reasonable */
2039 if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2040 spi_period(STp) > 0xff))
2041 spi_period(STp) = hostdata->min_period;
2043 spi_offset(STp) = offset;
2044 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2045 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2046 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2052 NCR_700_slave_configure(struct scsi_device *SDp)
2054 struct NCR_700_Host_Parameters *hostdata =
2055 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2057 /* to do here: allocate memory; build a queue_full list */
2058 if(SDp->tagged_supported) {
2059 scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2060 scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2061 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2063 /* initialise to default depth */
2064 scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2066 if(hostdata->fast) {
2067 /* Find the correct offset and period via domain validation */
2068 if (!spi_initial_dv(SDp->sdev_target))
2071 spi_offset(SDp->sdev_target) = 0;
2072 spi_period(SDp->sdev_target) = 0;
2078 NCR_700_slave_destroy(struct scsi_device *SDp)
2080 /* to do here: deallocate memory */
2084 NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2086 if (depth > NCR_700_MAX_TAGS)
2087 depth = NCR_700_MAX_TAGS;
2089 scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2093 static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2095 int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
2096 || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2097 struct NCR_700_Host_Parameters *hostdata =
2098 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2100 scsi_set_tag_type(SDp, tag_type);
2102 /* We have a global (per target) flag to track whether TCQ is
2103 * enabled, so we'll be turning it off for the entire target here.
2104 * our tag algorithm will fail if we mix tagged and untagged commands,
2105 * so quiesce the device before doing this */
2107 scsi_target_quiesce(SDp->sdev_target);
2110 /* shift back to the default unqueued number of commands
2111 * (the user can still raise this) */
2112 scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
2113 hostdata->tag_negotiated &= ~(1 << SDp->id);
2115 /* Here, we cleared the negotiation flag above, so this
2116 * will force the driver to renegotiate */
2117 scsi_activate_tcq(SDp, SDp->queue_depth);
2119 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2122 scsi_target_resume(SDp->sdev_target);
2128 NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2130 struct scsi_device *SDp = to_scsi_device(dev);
2132 return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2135 static struct device_attribute NCR_700_active_tags_attr = {
2137 .name = "active_tags",
2140 .show = NCR_700_show_active_tags,
2143 STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2144 &NCR_700_active_tags_attr,
2148 EXPORT_SYMBOL(NCR_700_detect);
2149 EXPORT_SYMBOL(NCR_700_release);
2150 EXPORT_SYMBOL(NCR_700_intr);
2152 static struct spi_function_template NCR_700_transport_functions = {
2153 .set_period = NCR_700_set_period,
2155 .set_offset = NCR_700_set_offset,
2159 static int __init NCR_700_init(void)
2161 NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2162 if(!NCR_700_transport_template)
2167 static void __exit NCR_700_exit(void)
2169 spi_release_transport(NCR_700_transport_template);
2172 module_init(NCR_700_init);
2173 module_exit(NCR_700_exit);