2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.5"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
203 /* ADMA Physical Region Descriptor - one SG segment */
212 enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
222 /* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
242 struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
245 struct nv_adma_prd *aprd;
247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
255 struct nv_host_priv {
263 unsigned int tag[ATA_MAX_QUEUE];
266 enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
273 struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
281 unsigned int last_issue_tag;
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
286 /* for NCQ interrupt analysis */
291 unsigned int ncq_flags;
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
299 static int nv_pci_device_resume(struct pci_dev *pdev);
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
308 static void nv_nf2_freeze(struct ata_port *ap);
309 static void nv_nf2_thaw(struct ata_port *ap);
310 static void nv_ck804_freeze(struct ata_port *ap);
311 static void nv_ck804_thaw(struct ata_port *ap);
312 static int nv_adma_slave_config(struct scsi_device *sdev);
313 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
314 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
315 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
316 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
317 static void nv_adma_irq_clear(struct ata_port *ap);
318 static int nv_adma_port_start(struct ata_port *ap);
319 static void nv_adma_port_stop(struct ata_port *ap);
321 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
322 static int nv_adma_port_resume(struct ata_port *ap);
324 static void nv_adma_freeze(struct ata_port *ap);
325 static void nv_adma_thaw(struct ata_port *ap);
326 static void nv_adma_error_handler(struct ata_port *ap);
327 static void nv_adma_host_stop(struct ata_host *host);
328 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
329 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
331 static void nv_mcp55_thaw(struct ata_port *ap);
332 static void nv_mcp55_freeze(struct ata_port *ap);
333 static void nv_swncq_error_handler(struct ata_port *ap);
334 static int nv_swncq_slave_config(struct scsi_device *sdev);
335 static int nv_swncq_port_start(struct ata_port *ap);
336 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
337 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
338 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
339 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
340 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
342 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
343 static int nv_swncq_port_resume(struct ata_port *ap);
350 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
356 static const struct pci_device_id nv_pci_tbl[] = {
357 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
358 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
372 { } /* terminate list */
375 static struct pci_driver nv_pci_driver = {
377 .id_table = nv_pci_tbl,
378 .probe = nv_init_one,
380 .suspend = ata_pci_device_suspend,
381 .resume = nv_pci_device_resume,
383 .remove = ata_pci_remove_one,
386 static struct scsi_host_template nv_sht = {
387 ATA_BMDMA_SHT(DRV_NAME),
390 static struct scsi_host_template nv_adma_sht = {
391 ATA_NCQ_SHT(DRV_NAME),
392 .can_queue = NV_ADMA_MAX_CPBS,
393 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
394 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
395 .slave_configure = nv_adma_slave_config,
398 static struct scsi_host_template nv_swncq_sht = {
399 ATA_NCQ_SHT(DRV_NAME),
400 .can_queue = ATA_MAX_QUEUE,
401 .sg_tablesize = LIBATA_MAX_PRD,
402 .dma_boundary = ATA_DMA_BOUNDARY,
403 .slave_configure = nv_swncq_slave_config,
406 static struct ata_port_operations nv_generic_ops = {
407 .inherits = &ata_bmdma_port_ops,
408 .hardreset = ATA_OP_NULL,
409 .scr_read = nv_scr_read,
410 .scr_write = nv_scr_write,
413 static struct ata_port_operations nv_nf2_ops = {
414 .inherits = &nv_generic_ops,
415 .freeze = nv_nf2_freeze,
419 static struct ata_port_operations nv_ck804_ops = {
420 .inherits = &nv_generic_ops,
421 .freeze = nv_ck804_freeze,
422 .thaw = nv_ck804_thaw,
423 .host_stop = nv_ck804_host_stop,
426 static struct ata_port_operations nv_adma_ops = {
427 .inherits = &nv_generic_ops,
429 .check_atapi_dma = nv_adma_check_atapi_dma,
430 .sff_tf_read = nv_adma_tf_read,
431 .qc_defer = ata_std_qc_defer,
432 .qc_prep = nv_adma_qc_prep,
433 .qc_issue = nv_adma_qc_issue,
434 .sff_irq_clear = nv_adma_irq_clear,
436 .freeze = nv_adma_freeze,
437 .thaw = nv_adma_thaw,
438 .error_handler = nv_adma_error_handler,
439 .post_internal_cmd = nv_adma_post_internal_cmd,
441 .port_start = nv_adma_port_start,
442 .port_stop = nv_adma_port_stop,
444 .port_suspend = nv_adma_port_suspend,
445 .port_resume = nv_adma_port_resume,
447 .host_stop = nv_adma_host_stop,
450 static struct ata_port_operations nv_swncq_ops = {
451 .inherits = &nv_generic_ops,
453 .qc_defer = ata_std_qc_defer,
454 .qc_prep = nv_swncq_qc_prep,
455 .qc_issue = nv_swncq_qc_issue,
457 .freeze = nv_mcp55_freeze,
458 .thaw = nv_mcp55_thaw,
459 .error_handler = nv_swncq_error_handler,
462 .port_suspend = nv_swncq_port_suspend,
463 .port_resume = nv_swncq_port_resume,
465 .port_start = nv_swncq_port_start,
469 irq_handler_t irq_handler;
470 struct scsi_host_template *sht;
473 #define NV_PI_PRIV(_irq_handler, _sht) \
474 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
476 static const struct ata_port_info nv_port_info[] = {
479 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
480 .pio_mask = NV_PIO_MASK,
481 .mwdma_mask = NV_MWDMA_MASK,
482 .udma_mask = NV_UDMA_MASK,
483 .port_ops = &nv_generic_ops,
484 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
488 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
489 .pio_mask = NV_PIO_MASK,
490 .mwdma_mask = NV_MWDMA_MASK,
491 .udma_mask = NV_UDMA_MASK,
492 .port_ops = &nv_nf2_ops,
493 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
497 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
498 .pio_mask = NV_PIO_MASK,
499 .mwdma_mask = NV_MWDMA_MASK,
500 .udma_mask = NV_UDMA_MASK,
501 .port_ops = &nv_ck804_ops,
502 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
506 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
507 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
508 .pio_mask = NV_PIO_MASK,
509 .mwdma_mask = NV_MWDMA_MASK,
510 .udma_mask = NV_UDMA_MASK,
511 .port_ops = &nv_adma_ops,
512 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
516 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
518 .pio_mask = NV_PIO_MASK,
519 .mwdma_mask = NV_MWDMA_MASK,
520 .udma_mask = NV_UDMA_MASK,
521 .port_ops = &nv_swncq_ops,
522 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
526 MODULE_AUTHOR("NVIDIA");
527 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
528 MODULE_LICENSE("GPL");
529 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
530 MODULE_VERSION(DRV_VERSION);
532 static int adma_enabled;
533 static int swncq_enabled = 1;
535 static void nv_adma_register_mode(struct ata_port *ap)
537 struct nv_adma_port_priv *pp = ap->private_data;
538 void __iomem *mmio = pp->ctl_block;
542 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
545 status = readw(mmio + NV_ADMA_STAT);
546 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
548 status = readw(mmio + NV_ADMA_STAT);
552 ata_port_printk(ap, KERN_WARNING,
553 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
556 tmp = readw(mmio + NV_ADMA_CTL);
557 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
560 status = readw(mmio + NV_ADMA_STAT);
561 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
563 status = readw(mmio + NV_ADMA_STAT);
567 ata_port_printk(ap, KERN_WARNING,
568 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
571 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
574 static void nv_adma_mode(struct ata_port *ap)
576 struct nv_adma_port_priv *pp = ap->private_data;
577 void __iomem *mmio = pp->ctl_block;
581 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
584 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
586 tmp = readw(mmio + NV_ADMA_CTL);
587 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
589 status = readw(mmio + NV_ADMA_STAT);
590 while (((status & NV_ADMA_STAT_LEGACY) ||
591 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
593 status = readw(mmio + NV_ADMA_STAT);
597 ata_port_printk(ap, KERN_WARNING,
598 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
601 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
604 static int nv_adma_slave_config(struct scsi_device *sdev)
606 struct ata_port *ap = ata_shost_to_port(sdev->host);
607 struct nv_adma_port_priv *pp = ap->private_data;
608 struct nv_adma_port_priv *port0, *port1;
609 struct scsi_device *sdev0, *sdev1;
610 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
611 unsigned long segment_boundary, flags;
612 unsigned short sg_tablesize;
615 u32 current_reg, new_reg, config_mask;
617 rc = ata_scsi_slave_config(sdev);
619 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
620 /* Not a proper libata device, ignore */
623 spin_lock_irqsave(ap->lock, flags);
625 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
627 * NVIDIA reports that ADMA mode does not support ATAPI commands.
628 * Therefore ATAPI commands are sent through the legacy interface.
629 * However, the legacy interface only supports 32-bit DMA.
630 * Restrict DMA parameters as required by the legacy interface
631 * when an ATAPI device is connected.
633 segment_boundary = ATA_DMA_BOUNDARY;
634 /* Subtract 1 since an extra entry may be needed for padding, see
636 sg_tablesize = LIBATA_MAX_PRD - 1;
638 /* Since the legacy DMA engine is in use, we need to disable ADMA
641 nv_adma_register_mode(ap);
643 segment_boundary = NV_ADMA_DMA_BOUNDARY;
644 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
648 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
650 if (ap->port_no == 1)
651 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
652 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
654 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
655 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
658 new_reg = current_reg | config_mask;
659 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
661 new_reg = current_reg & ~config_mask;
662 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
665 if (current_reg != new_reg)
666 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
668 port0 = ap->host->ports[0]->private_data;
669 port1 = ap->host->ports[1]->private_data;
670 sdev0 = ap->host->ports[0]->link.device[0].sdev;
671 sdev1 = ap->host->ports[1]->link.device[0].sdev;
672 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
673 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
674 /** We have to set the DMA mask to 32-bit if either port is in
675 ATAPI mode, since they are on the same PCI device which is
676 used for DMA mapping. If we set the mask we also need to set
677 the bounce limit on both ports to ensure that the block
678 layer doesn't feed addresses that cause DMA mapping to
679 choke. If either SCSI device is not allocated yet, it's OK
680 since that port will discover its correct setting when it
682 Note: Setting 32-bit mask should not fail. */
684 blk_queue_bounce_limit(sdev0->request_queue,
687 blk_queue_bounce_limit(sdev1->request_queue,
690 pci_set_dma_mask(pdev, ATA_DMA_MASK);
692 /** This shouldn't fail as it was set to this value before */
693 pci_set_dma_mask(pdev, pp->adma_dma_mask);
695 blk_queue_bounce_limit(sdev0->request_queue,
698 blk_queue_bounce_limit(sdev1->request_queue,
702 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
703 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
704 ata_port_printk(ap, KERN_INFO,
705 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
706 (unsigned long long)*ap->host->dev->dma_mask,
707 segment_boundary, sg_tablesize);
709 spin_unlock_irqrestore(ap->lock, flags);
714 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
716 struct nv_adma_port_priv *pp = qc->ap->private_data;
717 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
720 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
722 /* Other than when internal or pass-through commands are executed,
723 the only time this function will be called in ADMA mode will be
724 if a command fails. In the failure case we don't care about going
725 into register mode with ADMA commands pending, as the commands will
726 all shortly be aborted anyway. We assume that NCQ commands are not
727 issued via passthrough, which is the only way that switching into
728 ADMA mode could abort outstanding commands. */
729 nv_adma_register_mode(ap);
731 ata_sff_tf_read(ap, tf);
734 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
736 unsigned int idx = 0;
738 if (tf->flags & ATA_TFLAG_ISADDR) {
739 if (tf->flags & ATA_TFLAG_LBA48) {
740 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
741 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
742 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
743 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
744 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
745 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
747 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
749 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
750 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
751 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
752 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
755 if (tf->flags & ATA_TFLAG_DEVICE)
756 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
758 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
761 cpb[idx++] = cpu_to_le16(IGN);
766 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
768 struct nv_adma_port_priv *pp = ap->private_data;
769 u8 flags = pp->cpb[cpb_num].resp_flags;
771 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
773 if (unlikely((force_err ||
774 flags & (NV_CPB_RESP_ATA_ERR |
775 NV_CPB_RESP_CMD_ERR |
776 NV_CPB_RESP_CPB_ERR)))) {
777 struct ata_eh_info *ehi = &ap->link.eh_info;
780 ata_ehi_clear_desc(ehi);
781 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
782 if (flags & NV_CPB_RESP_ATA_ERR) {
783 ata_ehi_push_desc(ehi, "ATA error");
784 ehi->err_mask |= AC_ERR_DEV;
785 } else if (flags & NV_CPB_RESP_CMD_ERR) {
786 ata_ehi_push_desc(ehi, "CMD error");
787 ehi->err_mask |= AC_ERR_DEV;
788 } else if (flags & NV_CPB_RESP_CPB_ERR) {
789 ata_ehi_push_desc(ehi, "CPB error");
790 ehi->err_mask |= AC_ERR_SYSTEM;
793 /* notifier error, but no error in CPB flags? */
794 ata_ehi_push_desc(ehi, "unknown");
795 ehi->err_mask |= AC_ERR_OTHER;
798 /* Kill all commands. EH will determine what actually failed. */
806 if (likely(flags & NV_CPB_RESP_DONE)) {
807 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
808 VPRINTK("CPB flags done, flags=0x%x\n", flags);
810 DPRINTK("Completing qc from tag %d\n", cpb_num);
813 struct ata_eh_info *ehi = &ap->link.eh_info;
814 /* Notifier bits set without a command may indicate the drive
815 is misbehaving. Raise host state machine violation on this
817 ata_port_printk(ap, KERN_ERR,
818 "notifier for tag %d with no cmd?\n",
820 ehi->err_mask |= AC_ERR_HSM;
821 ehi->action |= ATA_EH_RESET;
829 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
831 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
833 /* freeze if hotplugged */
834 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
839 /* bail out if not our interrupt */
840 if (!(irq_stat & NV_INT_DEV))
843 /* DEV interrupt w/ no active qc? */
844 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
845 ata_sff_check_status(ap);
849 /* handle interrupt */
850 return ata_sff_host_intr(ap, qc);
853 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
855 struct ata_host *host = dev_instance;
857 u32 notifier_clears[2];
859 spin_lock(&host->lock);
861 for (i = 0; i < host->n_ports; i++) {
862 struct ata_port *ap = host->ports[i];
863 notifier_clears[i] = 0;
865 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
866 struct nv_adma_port_priv *pp = ap->private_data;
867 void __iomem *mmio = pp->ctl_block;
870 u32 notifier, notifier_error;
872 /* if ADMA is disabled, use standard ata interrupt handler */
873 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
874 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
875 >> (NV_INT_PORT_SHIFT * i);
876 handled += nv_host_intr(ap, irq_stat);
880 /* if in ATA register mode, check for standard interrupts */
881 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
882 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
883 >> (NV_INT_PORT_SHIFT * i);
884 if (ata_tag_valid(ap->link.active_tag))
885 /** NV_INT_DEV indication seems unreliable at times
886 at least in ADMA mode. Force it on always when a
887 command is active, to prevent losing interrupts. */
888 irq_stat |= NV_INT_DEV;
889 handled += nv_host_intr(ap, irq_stat);
892 notifier = readl(mmio + NV_ADMA_NOTIFIER);
893 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
894 notifier_clears[i] = notifier | notifier_error;
896 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
898 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
903 status = readw(mmio + NV_ADMA_STAT);
905 /* Clear status. Ensure the controller sees the clearing before we start
906 looking at any of the CPB statuses, so that any CPB completions after
907 this point in the handler will raise another interrupt. */
908 writew(status, mmio + NV_ADMA_STAT);
909 readw(mmio + NV_ADMA_STAT); /* flush posted write */
912 handled++; /* irq handled if we got here */
914 /* freeze if hotplugged or controller error */
915 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
916 NV_ADMA_STAT_HOTUNPLUG |
917 NV_ADMA_STAT_TIMEOUT |
918 NV_ADMA_STAT_SERROR))) {
919 struct ata_eh_info *ehi = &ap->link.eh_info;
921 ata_ehi_clear_desc(ehi);
922 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
923 if (status & NV_ADMA_STAT_TIMEOUT) {
924 ehi->err_mask |= AC_ERR_SYSTEM;
925 ata_ehi_push_desc(ehi, "timeout");
926 } else if (status & NV_ADMA_STAT_HOTPLUG) {
927 ata_ehi_hotplugged(ehi);
928 ata_ehi_push_desc(ehi, "hotplug");
929 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
930 ata_ehi_hotplugged(ehi);
931 ata_ehi_push_desc(ehi, "hot unplug");
932 } else if (status & NV_ADMA_STAT_SERROR) {
933 /* let libata analyze SError and figure out the cause */
934 ata_ehi_push_desc(ehi, "SError");
936 ata_ehi_push_desc(ehi, "unknown");
941 if (status & (NV_ADMA_STAT_DONE |
942 NV_ADMA_STAT_CPBERR |
943 NV_ADMA_STAT_CMD_COMPLETE)) {
944 u32 check_commands = notifier_clears[i];
947 if (status & NV_ADMA_STAT_CPBERR) {
948 /* Check all active commands */
949 if (ata_tag_valid(ap->link.active_tag))
950 check_commands = 1 <<
953 check_commands = ap->
957 /** Check CPBs for completed commands */
958 while ((pos = ffs(check_commands)) && !error) {
960 error = nv_adma_check_cpb(ap, pos,
961 notifier_error & (1 << pos));
962 check_commands &= ~(1 << pos);
968 if (notifier_clears[0] || notifier_clears[1]) {
969 /* Note: Both notifier clear registers must be written
970 if either is set, even if one is zero, according to NVIDIA. */
971 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
972 writel(notifier_clears[0], pp->notifier_clear_block);
973 pp = host->ports[1]->private_data;
974 writel(notifier_clears[1], pp->notifier_clear_block);
977 spin_unlock(&host->lock);
979 return IRQ_RETVAL(handled);
982 static void nv_adma_freeze(struct ata_port *ap)
984 struct nv_adma_port_priv *pp = ap->private_data;
985 void __iomem *mmio = pp->ctl_block;
990 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
993 /* clear any outstanding CK804 notifications */
994 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
995 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
997 /* Disable interrupt */
998 tmp = readw(mmio + NV_ADMA_CTL);
999 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1000 mmio + NV_ADMA_CTL);
1001 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1004 static void nv_adma_thaw(struct ata_port *ap)
1006 struct nv_adma_port_priv *pp = ap->private_data;
1007 void __iomem *mmio = pp->ctl_block;
1012 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1015 /* Enable interrupt */
1016 tmp = readw(mmio + NV_ADMA_CTL);
1017 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1018 mmio + NV_ADMA_CTL);
1019 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1022 static void nv_adma_irq_clear(struct ata_port *ap)
1024 struct nv_adma_port_priv *pp = ap->private_data;
1025 void __iomem *mmio = pp->ctl_block;
1026 u32 notifier_clears[2];
1028 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1029 ata_sff_irq_clear(ap);
1033 /* clear any outstanding CK804 notifications */
1034 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1035 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1037 /* clear ADMA status */
1038 writew(0xffff, mmio + NV_ADMA_STAT);
1040 /* clear notifiers - note both ports need to be written with
1041 something even though we are only clearing on one */
1042 if (ap->port_no == 0) {
1043 notifier_clears[0] = 0xFFFFFFFF;
1044 notifier_clears[1] = 0;
1046 notifier_clears[0] = 0;
1047 notifier_clears[1] = 0xFFFFFFFF;
1049 pp = ap->host->ports[0]->private_data;
1050 writel(notifier_clears[0], pp->notifier_clear_block);
1051 pp = ap->host->ports[1]->private_data;
1052 writel(notifier_clears[1], pp->notifier_clear_block);
1055 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1057 struct nv_adma_port_priv *pp = qc->ap->private_data;
1059 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1060 ata_sff_post_internal_cmd(qc);
1063 static int nv_adma_port_start(struct ata_port *ap)
1065 struct device *dev = ap->host->dev;
1066 struct nv_adma_port_priv *pp;
1071 struct pci_dev *pdev = to_pci_dev(dev);
1076 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1078 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1081 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1085 rc = ata_port_start(ap);
1089 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1093 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1094 ap->port_no * NV_ADMA_PORT_SIZE;
1095 pp->ctl_block = mmio;
1096 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1097 pp->notifier_clear_block = pp->gen_block +
1098 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1100 /* Now that the legacy PRD and padding buffer are allocated we can
1101 safely raise the DMA mask to allocate the CPB/APRD table.
1102 These are allowed to fail since we store the value that ends up
1103 being used to set as the bounce limit in slave_config later if
1105 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1106 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1107 pp->adma_dma_mask = *dev->dma_mask;
1109 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1110 &mem_dma, GFP_KERNEL);
1113 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1116 * First item in chunk of DMA memory:
1117 * 128-byte command parameter block (CPB)
1118 * one for each command tag
1121 pp->cpb_dma = mem_dma;
1123 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1124 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1126 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1127 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1130 * Second item: block of ADMA_SGTBL_LEN s/g entries
1133 pp->aprd_dma = mem_dma;
1135 ap->private_data = pp;
1137 /* clear any outstanding interrupt conditions */
1138 writew(0xffff, mmio + NV_ADMA_STAT);
1140 /* initialize port variables */
1141 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1143 /* clear CPB fetch count */
1144 writew(0, mmio + NV_ADMA_CPB_COUNT);
1146 /* clear GO for register mode, enable interrupt */
1147 tmp = readw(mmio + NV_ADMA_CTL);
1148 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1149 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1151 tmp = readw(mmio + NV_ADMA_CTL);
1152 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1153 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1155 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1156 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1161 static void nv_adma_port_stop(struct ata_port *ap)
1163 struct nv_adma_port_priv *pp = ap->private_data;
1164 void __iomem *mmio = pp->ctl_block;
1167 writew(0, mmio + NV_ADMA_CTL);
1171 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1173 struct nv_adma_port_priv *pp = ap->private_data;
1174 void __iomem *mmio = pp->ctl_block;
1176 /* Go to register mode - clears GO */
1177 nv_adma_register_mode(ap);
1179 /* clear CPB fetch count */
1180 writew(0, mmio + NV_ADMA_CPB_COUNT);
1182 /* disable interrupt, shut down port */
1183 writew(0, mmio + NV_ADMA_CTL);
1188 static int nv_adma_port_resume(struct ata_port *ap)
1190 struct nv_adma_port_priv *pp = ap->private_data;
1191 void __iomem *mmio = pp->ctl_block;
1194 /* set CPB block location */
1195 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1196 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1198 /* clear any outstanding interrupt conditions */
1199 writew(0xffff, mmio + NV_ADMA_STAT);
1201 /* initialize port variables */
1202 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1204 /* clear CPB fetch count */
1205 writew(0, mmio + NV_ADMA_CPB_COUNT);
1207 /* clear GO for register mode, enable interrupt */
1208 tmp = readw(mmio + NV_ADMA_CTL);
1209 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1210 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1212 tmp = readw(mmio + NV_ADMA_CTL);
1213 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1214 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1216 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1217 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1223 static void nv_adma_setup_port(struct ata_port *ap)
1225 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1226 struct ata_ioports *ioport = &ap->ioaddr;
1230 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1232 ioport->cmd_addr = mmio;
1233 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1234 ioport->error_addr =
1235 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1236 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1237 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1238 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1239 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1240 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1241 ioport->status_addr =
1242 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1243 ioport->altstatus_addr =
1244 ioport->ctl_addr = mmio + 0x20;
1247 static int nv_adma_host_init(struct ata_host *host)
1249 struct pci_dev *pdev = to_pci_dev(host->dev);
1255 /* enable ADMA on the ports */
1256 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1257 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1258 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1259 NV_MCP_SATA_CFG_20_PORT1_EN |
1260 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1262 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1264 for (i = 0; i < host->n_ports; i++)
1265 nv_adma_setup_port(host->ports[i]);
1270 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1271 struct scatterlist *sg,
1273 struct nv_adma_prd *aprd)
1276 if (qc->tf.flags & ATA_TFLAG_WRITE)
1277 flags |= NV_APRD_WRITE;
1278 if (idx == qc->n_elem - 1)
1279 flags |= NV_APRD_END;
1281 flags |= NV_APRD_CONT;
1283 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1284 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1285 aprd->flags = flags;
1286 aprd->packet_len = 0;
1289 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1291 struct nv_adma_port_priv *pp = qc->ap->private_data;
1292 struct nv_adma_prd *aprd;
1293 struct scatterlist *sg;
1298 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1299 aprd = (si < 5) ? &cpb->aprd[si] :
1300 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1301 nv_adma_fill_aprd(qc, sg, si, aprd);
1304 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1306 cpb->next_aprd = cpu_to_le64(0);
1309 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1311 struct nv_adma_port_priv *pp = qc->ap->private_data;
1313 /* ADMA engine can only be used for non-ATAPI DMA commands,
1314 or interrupt-driven no-data commands. */
1315 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1316 (qc->tf.flags & ATA_TFLAG_POLLING))
1319 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1320 (qc->tf.protocol == ATA_PROT_NODATA))
1326 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1328 struct nv_adma_port_priv *pp = qc->ap->private_data;
1329 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1330 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1333 if (nv_adma_use_reg_mode(qc)) {
1334 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1335 (qc->flags & ATA_QCFLAG_DMAMAP));
1336 nv_adma_register_mode(qc->ap);
1337 ata_sff_qc_prep(qc);
1341 cpb->resp_flags = NV_CPB_RESP_DONE;
1348 cpb->next_cpb_idx = 0;
1350 /* turn on NCQ flags for NCQ commands */
1351 if (qc->tf.protocol == ATA_PROT_NCQ)
1352 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1354 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1356 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1358 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1359 nv_adma_fill_sg(qc, cpb);
1360 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1362 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1364 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1365 until we are finished filling in all of the contents */
1367 cpb->ctl_flags = ctl_flags;
1369 cpb->resp_flags = 0;
1372 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1374 struct nv_adma_port_priv *pp = qc->ap->private_data;
1375 void __iomem *mmio = pp->ctl_block;
1376 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1380 /* We can't handle result taskfile with NCQ commands, since
1381 retrieving the taskfile switches us out of ADMA mode and would abort
1382 existing commands. */
1383 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1384 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1385 ata_dev_printk(qc->dev, KERN_ERR,
1386 "NCQ w/ RESULT_TF not allowed\n");
1387 return AC_ERR_SYSTEM;
1390 if (nv_adma_use_reg_mode(qc)) {
1391 /* use ATA register mode */
1392 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1393 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1394 (qc->flags & ATA_QCFLAG_DMAMAP));
1395 nv_adma_register_mode(qc->ap);
1396 return ata_sff_qc_issue(qc);
1398 nv_adma_mode(qc->ap);
1400 /* write append register, command tag in lower 8 bits
1401 and (number of cpbs to append -1) in top 8 bits */
1404 if (curr_ncq != pp->last_issue_ncq) {
1405 /* Seems to need some delay before switching between NCQ and
1406 non-NCQ commands, else we get command timeouts and such. */
1408 pp->last_issue_ncq = curr_ncq;
1411 writew(qc->tag, mmio + NV_ADMA_APPEND);
1413 DPRINTK("Issued tag %u\n", qc->tag);
1418 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1420 struct ata_host *host = dev_instance;
1422 unsigned int handled = 0;
1423 unsigned long flags;
1425 spin_lock_irqsave(&host->lock, flags);
1427 for (i = 0; i < host->n_ports; i++) {
1428 struct ata_port *ap;
1430 ap = host->ports[i];
1432 !(ap->flags & ATA_FLAG_DISABLED)) {
1433 struct ata_queued_cmd *qc;
1435 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1436 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1437 handled += ata_sff_host_intr(ap, qc);
1439 // No request pending? Clear interrupt status
1440 // anyway, in case there's one pending.
1441 ap->ops->sff_check_status(ap);
1446 spin_unlock_irqrestore(&host->lock, flags);
1448 return IRQ_RETVAL(handled);
1451 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1455 for (i = 0; i < host->n_ports; i++) {
1456 struct ata_port *ap = host->ports[i];
1458 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1459 handled += nv_host_intr(ap, irq_stat);
1461 irq_stat >>= NV_INT_PORT_SHIFT;
1464 return IRQ_RETVAL(handled);
1467 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1469 struct ata_host *host = dev_instance;
1473 spin_lock(&host->lock);
1474 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1475 ret = nv_do_interrupt(host, irq_stat);
1476 spin_unlock(&host->lock);
1481 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1483 struct ata_host *host = dev_instance;
1487 spin_lock(&host->lock);
1488 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1489 ret = nv_do_interrupt(host, irq_stat);
1490 spin_unlock(&host->lock);
1495 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1497 if (sc_reg > SCR_CONTROL)
1500 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1504 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1506 if (sc_reg > SCR_CONTROL)
1509 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1513 static void nv_nf2_freeze(struct ata_port *ap)
1515 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1516 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1519 mask = ioread8(scr_addr + NV_INT_ENABLE);
1520 mask &= ~(NV_INT_ALL << shift);
1521 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1524 static void nv_nf2_thaw(struct ata_port *ap)
1526 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1527 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1530 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1532 mask = ioread8(scr_addr + NV_INT_ENABLE);
1533 mask |= (NV_INT_MASK << shift);
1534 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1537 static void nv_ck804_freeze(struct ata_port *ap)
1539 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1540 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1543 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1544 mask &= ~(NV_INT_ALL << shift);
1545 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1548 static void nv_ck804_thaw(struct ata_port *ap)
1550 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1551 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1554 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1556 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1557 mask |= (NV_INT_MASK << shift);
1558 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1561 static void nv_mcp55_freeze(struct ata_port *ap)
1563 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1564 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1567 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1569 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1570 mask &= ~(NV_INT_ALL_MCP55 << shift);
1571 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1575 static void nv_mcp55_thaw(struct ata_port *ap)
1577 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1578 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1581 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1583 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1584 mask |= (NV_INT_MASK_MCP55 << shift);
1585 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1589 static void nv_adma_error_handler(struct ata_port *ap)
1591 struct nv_adma_port_priv *pp = ap->private_data;
1592 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1593 void __iomem *mmio = pp->ctl_block;
1597 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1598 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1599 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1600 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1601 u32 status = readw(mmio + NV_ADMA_STAT);
1602 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1603 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1605 ata_port_printk(ap, KERN_ERR,
1606 "EH in ADMA mode, notifier 0x%X "
1607 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1608 "next cpb count 0x%X next cpb idx 0x%x\n",
1609 notifier, notifier_error, gen_ctl, status,
1610 cpb_count, next_cpb_idx);
1612 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1613 struct nv_adma_cpb *cpb = &pp->cpb[i];
1614 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1615 ap->link.sactive & (1 << i))
1616 ata_port_printk(ap, KERN_ERR,
1617 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1618 i, cpb->ctl_flags, cpb->resp_flags);
1622 /* Push us back into port register mode for error handling. */
1623 nv_adma_register_mode(ap);
1625 /* Mark all of the CPBs as invalid to prevent them from
1627 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1628 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1630 /* clear CPB fetch count */
1631 writew(0, mmio + NV_ADMA_CPB_COUNT);
1634 tmp = readw(mmio + NV_ADMA_CTL);
1635 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1636 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1638 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1639 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1642 ata_sff_error_handler(ap);
1645 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1647 struct nv_swncq_port_priv *pp = ap->private_data;
1648 struct defer_queue *dq = &pp->defer_queue;
1651 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1652 dq->defer_bits |= (1 << qc->tag);
1653 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1656 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1658 struct nv_swncq_port_priv *pp = ap->private_data;
1659 struct defer_queue *dq = &pp->defer_queue;
1662 if (dq->head == dq->tail) /* null queue */
1665 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1666 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1667 WARN_ON(!(dq->defer_bits & (1 << tag)));
1668 dq->defer_bits &= ~(1 << tag);
1670 return ata_qc_from_tag(ap, tag);
1673 static void nv_swncq_fis_reinit(struct ata_port *ap)
1675 struct nv_swncq_port_priv *pp = ap->private_data;
1678 pp->dmafis_bits = 0;
1679 pp->sdbfis_bits = 0;
1683 static void nv_swncq_pp_reinit(struct ata_port *ap)
1685 struct nv_swncq_port_priv *pp = ap->private_data;
1686 struct defer_queue *dq = &pp->defer_queue;
1692 pp->last_issue_tag = ATA_TAG_POISON;
1693 nv_swncq_fis_reinit(ap);
1696 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1698 struct nv_swncq_port_priv *pp = ap->private_data;
1700 writew(fis, pp->irq_block);
1703 static void __ata_bmdma_stop(struct ata_port *ap)
1705 struct ata_queued_cmd qc;
1708 ata_bmdma_stop(&qc);
1711 static void nv_swncq_ncq_stop(struct ata_port *ap)
1713 struct nv_swncq_port_priv *pp = ap->private_data;
1718 ata_port_printk(ap, KERN_ERR,
1719 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1720 ap->qc_active, ap->link.sactive);
1721 ata_port_printk(ap, KERN_ERR,
1722 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1723 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1724 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1725 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1727 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1728 ap->ops->sff_check_status(ap),
1729 ioread8(ap->ioaddr.error_addr));
1731 sactive = readl(pp->sactive_block);
1732 done_mask = pp->qc_active ^ sactive;
1734 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1735 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1737 if (pp->qc_active & (1 << i))
1739 else if (done_mask & (1 << i))
1744 ata_port_printk(ap, KERN_ERR,
1745 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1746 (pp->dhfis_bits >> i) & 0x1,
1747 (pp->dmafis_bits >> i) & 0x1,
1748 (pp->sdbfis_bits >> i) & 0x1,
1749 (sactive >> i) & 0x1,
1750 (err ? "error! tag doesn't exit" : " "));
1753 nv_swncq_pp_reinit(ap);
1754 ap->ops->sff_irq_clear(ap);
1755 __ata_bmdma_stop(ap);
1756 nv_swncq_irq_clear(ap, 0xffff);
1759 static void nv_swncq_error_handler(struct ata_port *ap)
1761 struct ata_eh_context *ehc = &ap->link.eh_context;
1763 if (ap->link.sactive) {
1764 nv_swncq_ncq_stop(ap);
1765 ehc->i.action |= ATA_EH_RESET;
1768 ata_sff_error_handler(ap);
1772 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1774 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1778 writel(~0, mmio + NV_INT_STATUS_MCP55);
1781 writel(0, mmio + NV_INT_ENABLE_MCP55);
1784 tmp = readl(mmio + NV_CTL_MCP55);
1785 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1786 writel(tmp, mmio + NV_CTL_MCP55);
1791 static int nv_swncq_port_resume(struct ata_port *ap)
1793 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1797 writel(~0, mmio + NV_INT_STATUS_MCP55);
1800 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1803 tmp = readl(mmio + NV_CTL_MCP55);
1804 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1810 static void nv_swncq_host_init(struct ata_host *host)
1813 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1814 struct pci_dev *pdev = to_pci_dev(host->dev);
1817 /* disable ECO 398 */
1818 pci_read_config_byte(pdev, 0x7f, ®val);
1819 regval &= ~(1 << 7);
1820 pci_write_config_byte(pdev, 0x7f, regval);
1823 tmp = readl(mmio + NV_CTL_MCP55);
1824 VPRINTK("HOST_CTL:0x%X\n", tmp);
1825 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1827 /* enable irq intr */
1828 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1829 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1830 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1832 /* clear port irq */
1833 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1836 static int nv_swncq_slave_config(struct scsi_device *sdev)
1838 struct ata_port *ap = ata_shost_to_port(sdev->host);
1839 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1840 struct ata_device *dev;
1843 u8 check_maxtor = 0;
1844 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1846 rc = ata_scsi_slave_config(sdev);
1847 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1848 /* Not a proper libata device, ignore */
1851 dev = &ap->link.device[sdev->id];
1852 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1855 /* if MCP51 and Maxtor, then disable ncq */
1856 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1857 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1860 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1861 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1862 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1863 pci_read_config_byte(pdev, 0x8, &rev);
1871 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1873 if (strncmp(model_num, "Maxtor", 6) == 0) {
1874 ata_scsi_change_queue_depth(sdev, 1);
1875 ata_dev_printk(dev, KERN_NOTICE,
1876 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1882 static int nv_swncq_port_start(struct ata_port *ap)
1884 struct device *dev = ap->host->dev;
1885 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1886 struct nv_swncq_port_priv *pp;
1889 rc = ata_port_start(ap);
1893 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1897 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1898 &pp->prd_dma, GFP_KERNEL);
1901 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1903 ap->private_data = pp;
1904 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1905 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1906 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1911 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1913 if (qc->tf.protocol != ATA_PROT_NCQ) {
1914 ata_sff_qc_prep(qc);
1918 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1921 nv_swncq_fill_sg(qc);
1924 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1926 struct ata_port *ap = qc->ap;
1927 struct scatterlist *sg;
1928 struct nv_swncq_port_priv *pp = ap->private_data;
1929 struct ata_prd *prd;
1930 unsigned int si, idx;
1932 prd = pp->prd + ATA_MAX_PRD * qc->tag;
1935 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1939 addr = (u32)sg_dma_address(sg);
1940 sg_len = sg_dma_len(sg);
1943 offset = addr & 0xffff;
1945 if ((offset + sg_len) > 0x10000)
1946 len = 0x10000 - offset;
1948 prd[idx].addr = cpu_to_le32(addr);
1949 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1957 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1960 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1961 struct ata_queued_cmd *qc)
1963 struct nv_swncq_port_priv *pp = ap->private_data;
1970 writel((1 << qc->tag), pp->sactive_block);
1971 pp->last_issue_tag = qc->tag;
1972 pp->dhfis_bits &= ~(1 << qc->tag);
1973 pp->dmafis_bits &= ~(1 << qc->tag);
1974 pp->qc_active |= (0x1 << qc->tag);
1976 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
1977 ap->ops->sff_exec_command(ap, &qc->tf);
1979 DPRINTK("Issued tag %u\n", qc->tag);
1984 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
1986 struct ata_port *ap = qc->ap;
1987 struct nv_swncq_port_priv *pp = ap->private_data;
1989 if (qc->tf.protocol != ATA_PROT_NCQ)
1990 return ata_sff_qc_issue(qc);
1995 nv_swncq_issue_atacmd(ap, qc);
1997 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2002 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2005 struct ata_eh_info *ehi = &ap->link.eh_info;
2007 ata_ehi_clear_desc(ehi);
2009 /* AHCI needs SError cleared; otherwise, it might lock up */
2010 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2011 sata_scr_write(&ap->link, SCR_ERROR, serror);
2013 /* analyze @irq_stat */
2014 if (fis & NV_SWNCQ_IRQ_ADDED)
2015 ata_ehi_push_desc(ehi, "hot plug");
2016 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2017 ata_ehi_push_desc(ehi, "hot unplug");
2019 ata_ehi_hotplugged(ehi);
2021 /* okay, let's hand over to EH */
2022 ehi->serror |= serror;
2024 ata_port_freeze(ap);
2027 static int nv_swncq_sdbfis(struct ata_port *ap)
2029 struct ata_queued_cmd *qc;
2030 struct nv_swncq_port_priv *pp = ap->private_data;
2031 struct ata_eh_info *ehi = &ap->link.eh_info;
2039 host_stat = ap->ops->bmdma_status(ap);
2040 if (unlikely(host_stat & ATA_DMA_ERR)) {
2041 /* error when transfering data to/from memory */
2042 ata_ehi_clear_desc(ehi);
2043 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2044 ehi->err_mask |= AC_ERR_HOST_BUS;
2045 ehi->action |= ATA_EH_RESET;
2049 ap->ops->sff_irq_clear(ap);
2050 __ata_bmdma_stop(ap);
2052 sactive = readl(pp->sactive_block);
2053 done_mask = pp->qc_active ^ sactive;
2055 if (unlikely(done_mask & sactive)) {
2056 ata_ehi_clear_desc(ehi);
2057 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2058 "(%08x->%08x)", pp->qc_active, sactive);
2059 ehi->err_mask |= AC_ERR_HSM;
2060 ehi->action |= ATA_EH_RESET;
2063 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2064 if (!(done_mask & (1 << i)))
2067 qc = ata_qc_from_tag(ap, i);
2069 ata_qc_complete(qc);
2070 pp->qc_active &= ~(1 << i);
2071 pp->dhfis_bits &= ~(1 << i);
2072 pp->dmafis_bits &= ~(1 << i);
2073 pp->sdbfis_bits |= (1 << i);
2078 if (!ap->qc_active) {
2080 nv_swncq_pp_reinit(ap);
2084 if (pp->qc_active & pp->dhfis_bits)
2087 if ((pp->ncq_flags & ncq_saw_backout) ||
2088 (pp->qc_active ^ pp->dhfis_bits))
2089 /* if the controller cann't get a device to host register FIS,
2090 * The driver needs to reissue the new command.
2094 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2095 "SWNCQ:qc_active 0x%X defer_bits %X "
2096 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2097 ap->print_id, ap->qc_active, pp->qc_active,
2098 pp->defer_queue.defer_bits, pp->dhfis_bits,
2099 pp->dmafis_bits, pp->last_issue_tag);
2101 nv_swncq_fis_reinit(ap);
2104 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2105 nv_swncq_issue_atacmd(ap, qc);
2109 if (pp->defer_queue.defer_bits) {
2110 /* send deferral queue command */
2111 qc = nv_swncq_qc_from_dq(ap);
2112 WARN_ON(qc == NULL);
2113 nv_swncq_issue_atacmd(ap, qc);
2119 static inline u32 nv_swncq_tag(struct ata_port *ap)
2121 struct nv_swncq_port_priv *pp = ap->private_data;
2124 tag = readb(pp->tag_block) >> 2;
2125 return (tag & 0x1f);
2128 static int nv_swncq_dmafis(struct ata_port *ap)
2130 struct ata_queued_cmd *qc;
2134 struct nv_swncq_port_priv *pp = ap->private_data;
2136 __ata_bmdma_stop(ap);
2137 tag = nv_swncq_tag(ap);
2139 DPRINTK("dma setup tag 0x%x\n", tag);
2140 qc = ata_qc_from_tag(ap, tag);
2145 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2147 /* load PRD table addr. */
2148 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2149 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2151 /* specify data direction, triple-check start bit is clear */
2152 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2153 dmactl &= ~ATA_DMA_WR;
2155 dmactl |= ATA_DMA_WR;
2157 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2162 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2164 struct nv_swncq_port_priv *pp = ap->private_data;
2165 struct ata_queued_cmd *qc;
2166 struct ata_eh_info *ehi = &ap->link.eh_info;
2171 ata_stat = ap->ops->sff_check_status(ap);
2172 nv_swncq_irq_clear(ap, fis);
2176 if (ap->pflags & ATA_PFLAG_FROZEN)
2179 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2180 nv_swncq_hotplug(ap, fis);
2187 if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2189 ap->ops->scr_write(ap, SCR_ERROR, serror);
2191 if (ata_stat & ATA_ERR) {
2192 ata_ehi_clear_desc(ehi);
2193 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2194 ehi->err_mask |= AC_ERR_DEV;
2195 ehi->serror |= serror;
2196 ehi->action |= ATA_EH_RESET;
2197 ata_port_freeze(ap);
2201 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2202 /* If the IRQ is backout, driver must issue
2203 * the new command again some time later.
2205 pp->ncq_flags |= ncq_saw_backout;
2208 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2209 pp->ncq_flags |= ncq_saw_sdb;
2210 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2211 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2212 ap->print_id, pp->qc_active, pp->dhfis_bits,
2213 pp->dmafis_bits, readl(pp->sactive_block));
2214 rc = nv_swncq_sdbfis(ap);
2219 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2220 /* The interrupt indicates the new command
2221 * was transmitted correctly to the drive.
2223 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2224 pp->ncq_flags |= ncq_saw_d2h;
2225 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2226 ata_ehi_push_desc(ehi, "illegal fis transaction");
2227 ehi->err_mask |= AC_ERR_HSM;
2228 ehi->action |= ATA_EH_RESET;
2232 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2233 !(pp->ncq_flags & ncq_saw_dmas)) {
2234 ata_stat = ap->ops->sff_check_status(ap);
2235 if (ata_stat & ATA_BUSY)
2238 if (pp->defer_queue.defer_bits) {
2239 DPRINTK("send next command\n");
2240 qc = nv_swncq_qc_from_dq(ap);
2241 nv_swncq_issue_atacmd(ap, qc);
2246 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2247 /* program the dma controller with appropriate PRD buffers
2248 * and start the DMA transfer for requested command.
2250 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2251 pp->ncq_flags |= ncq_saw_dmas;
2252 rc = nv_swncq_dmafis(ap);
2258 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2259 ata_port_freeze(ap);
2263 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2265 struct ata_host *host = dev_instance;
2267 unsigned int handled = 0;
2268 unsigned long flags;
2271 spin_lock_irqsave(&host->lock, flags);
2273 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2275 for (i = 0; i < host->n_ports; i++) {
2276 struct ata_port *ap = host->ports[i];
2278 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2279 if (ap->link.sactive) {
2280 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2283 if (irq_stat) /* reserve Hotplug */
2284 nv_swncq_irq_clear(ap, 0xfff0);
2286 handled += nv_host_intr(ap, (u8)irq_stat);
2289 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2292 spin_unlock_irqrestore(&host->lock, flags);
2294 return IRQ_RETVAL(handled);
2297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2299 static int printed_version;
2300 const struct ata_port_info *ppi[] = { NULL, NULL };
2301 struct nv_pi_priv *ipriv;
2302 struct ata_host *host;
2303 struct nv_host_priv *hpriv;
2307 unsigned long type = ent->driver_data;
2309 // Make sure this is a SATA controller by counting the number of bars
2310 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2311 // it's an IDE controller and we ignore it.
2312 for (bar = 0; bar < 6; bar++)
2313 if (pci_resource_start(pdev, bar) == 0)
2316 if (!printed_version++)
2317 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2319 rc = pcim_enable_device(pdev);
2323 /* determine type and allocate host */
2324 if (type == CK804 && adma_enabled) {
2325 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2329 if (type == SWNCQ) {
2331 dev_printk(KERN_NOTICE, &pdev->dev,
2332 "Using SWNCQ mode\n");
2337 ppi[0] = &nv_port_info[type];
2338 ipriv = ppi[0]->private_data;
2339 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2343 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2347 host->private_data = hpriv;
2349 /* request and iomap NV_MMIO_BAR */
2350 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2354 /* configure SCR access */
2355 base = host->iomap[NV_MMIO_BAR];
2356 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2357 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2359 /* enable SATA space for CK804 */
2360 if (type >= CK804) {
2363 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2364 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2365 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2370 rc = nv_adma_host_init(host);
2373 } else if (type == SWNCQ)
2374 nv_swncq_host_init(host);
2376 pci_set_master(pdev);
2377 return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2378 IRQF_SHARED, ipriv->sht);
2382 static int nv_pci_device_resume(struct pci_dev *pdev)
2384 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2385 struct nv_host_priv *hpriv = host->private_data;
2388 rc = ata_pci_device_do_resume(pdev);
2392 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2393 if (hpriv->type >= CK804) {
2396 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2397 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2398 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2400 if (hpriv->type == ADMA) {
2402 struct nv_adma_port_priv *pp;
2403 /* enable/disable ADMA on the ports appropriately */
2404 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2406 pp = host->ports[0]->private_data;
2407 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2408 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2409 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2411 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2412 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2413 pp = host->ports[1]->private_data;
2414 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2415 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2416 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2418 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2419 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2421 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2425 ata_host_resume(host);
2431 static void nv_ck804_host_stop(struct ata_host *host)
2433 struct pci_dev *pdev = to_pci_dev(host->dev);
2436 /* disable SATA space for CK804 */
2437 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2438 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2439 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2442 static void nv_adma_host_stop(struct ata_host *host)
2444 struct pci_dev *pdev = to_pci_dev(host->dev);
2447 /* disable ADMA on the ports */
2448 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2449 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2450 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2451 NV_MCP_SATA_CFG_20_PORT1_EN |
2452 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2454 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2456 nv_ck804_host_stop(host);
2459 static int __init nv_init(void)
2461 return pci_register_driver(&nv_pci_driver);
2464 static void __exit nv_exit(void)
2466 pci_unregister_driver(&nv_pci_driver);
2469 module_init(nv_init);
2470 module_exit(nv_exit);
2471 module_param_named(adma, adma_enabled, bool, 0444);
2472 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2473 module_param_named(swncq, swncq_enabled, bool, 0444);
2474 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");