2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.3"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
174 /* ADMA Physical Region Descriptor - one SG segment */
183 enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
213 struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
216 struct nv_adma_prd *aprd;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
224 struct nv_host_priv {
228 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
230 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
231 static void nv_remove_one (struct pci_dev *pdev);
232 static int nv_pci_device_resume(struct pci_dev *pdev);
233 static void nv_ck804_host_stop(struct ata_host *host);
234 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
235 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
236 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
237 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
238 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
240 static void nv_nf2_freeze(struct ata_port *ap);
241 static void nv_nf2_thaw(struct ata_port *ap);
242 static void nv_ck804_freeze(struct ata_port *ap);
243 static void nv_ck804_thaw(struct ata_port *ap);
244 static void nv_error_handler(struct ata_port *ap);
245 static int nv_adma_slave_config(struct scsi_device *sdev);
246 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
247 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
248 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
249 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
250 static void nv_adma_irq_clear(struct ata_port *ap);
251 static int nv_adma_port_start(struct ata_port *ap);
252 static void nv_adma_port_stop(struct ata_port *ap);
253 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
254 static int nv_adma_port_resume(struct ata_port *ap);
255 static void nv_adma_error_handler(struct ata_port *ap);
256 static void nv_adma_host_stop(struct ata_host *host);
257 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
258 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
259 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
260 static u8 nv_adma_bmdma_status(struct ata_port *ap);
266 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
271 static const struct pci_device_id nv_pci_tbl[] = {
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
286 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
287 PCI_ANY_ID, PCI_ANY_ID,
288 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
289 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
290 PCI_ANY_ID, PCI_ANY_ID,
291 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
293 { } /* terminate list */
296 static struct pci_driver nv_pci_driver = {
298 .id_table = nv_pci_tbl,
299 .probe = nv_init_one,
300 .suspend = ata_pci_device_suspend,
301 .resume = nv_pci_device_resume,
302 .remove = nv_remove_one,
305 static struct scsi_host_template nv_sht = {
306 .module = THIS_MODULE,
308 .ioctl = ata_scsi_ioctl,
309 .queuecommand = ata_scsi_queuecmd,
310 .can_queue = ATA_DEF_QUEUE,
311 .this_id = ATA_SHT_THIS_ID,
312 .sg_tablesize = LIBATA_MAX_PRD,
313 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
314 .emulated = ATA_SHT_EMULATED,
315 .use_clustering = ATA_SHT_USE_CLUSTERING,
316 .proc_name = DRV_NAME,
317 .dma_boundary = ATA_DMA_BOUNDARY,
318 .slave_configure = ata_scsi_slave_config,
319 .slave_destroy = ata_scsi_slave_destroy,
320 .bios_param = ata_std_bios_param,
321 .suspend = ata_scsi_device_suspend,
322 .resume = ata_scsi_device_resume,
325 static struct scsi_host_template nv_adma_sht = {
326 .module = THIS_MODULE,
328 .ioctl = ata_scsi_ioctl,
329 .queuecommand = ata_scsi_queuecmd,
330 .can_queue = NV_ADMA_MAX_CPBS,
331 .this_id = ATA_SHT_THIS_ID,
332 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
333 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
334 .emulated = ATA_SHT_EMULATED,
335 .use_clustering = ATA_SHT_USE_CLUSTERING,
336 .proc_name = DRV_NAME,
337 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
338 .slave_configure = nv_adma_slave_config,
339 .slave_destroy = ata_scsi_slave_destroy,
340 .bios_param = ata_std_bios_param,
341 .suspend = ata_scsi_device_suspend,
342 .resume = ata_scsi_device_resume,
345 static const struct ata_port_operations nv_generic_ops = {
346 .port_disable = ata_port_disable,
347 .tf_load = ata_tf_load,
348 .tf_read = ata_tf_read,
349 .exec_command = ata_exec_command,
350 .check_status = ata_check_status,
351 .dev_select = ata_std_dev_select,
352 .bmdma_setup = ata_bmdma_setup,
353 .bmdma_start = ata_bmdma_start,
354 .bmdma_stop = ata_bmdma_stop,
355 .bmdma_status = ata_bmdma_status,
356 .qc_prep = ata_qc_prep,
357 .qc_issue = ata_qc_issue_prot,
358 .freeze = ata_bmdma_freeze,
359 .thaw = ata_bmdma_thaw,
360 .error_handler = nv_error_handler,
361 .post_internal_cmd = ata_bmdma_post_internal_cmd,
362 .data_xfer = ata_data_xfer,
363 .irq_handler = nv_generic_interrupt,
364 .irq_clear = ata_bmdma_irq_clear,
365 .irq_on = ata_irq_on,
366 .irq_ack = ata_irq_ack,
367 .scr_read = nv_scr_read,
368 .scr_write = nv_scr_write,
369 .port_start = ata_port_start,
372 static const struct ata_port_operations nv_nf2_ops = {
373 .port_disable = ata_port_disable,
374 .tf_load = ata_tf_load,
375 .tf_read = ata_tf_read,
376 .exec_command = ata_exec_command,
377 .check_status = ata_check_status,
378 .dev_select = ata_std_dev_select,
379 .bmdma_setup = ata_bmdma_setup,
380 .bmdma_start = ata_bmdma_start,
381 .bmdma_stop = ata_bmdma_stop,
382 .bmdma_status = ata_bmdma_status,
383 .qc_prep = ata_qc_prep,
384 .qc_issue = ata_qc_issue_prot,
385 .freeze = nv_nf2_freeze,
387 .error_handler = nv_error_handler,
388 .post_internal_cmd = ata_bmdma_post_internal_cmd,
389 .data_xfer = ata_data_xfer,
390 .irq_handler = nv_nf2_interrupt,
391 .irq_clear = ata_bmdma_irq_clear,
392 .irq_on = ata_irq_on,
393 .irq_ack = ata_irq_ack,
394 .scr_read = nv_scr_read,
395 .scr_write = nv_scr_write,
396 .port_start = ata_port_start,
399 static const struct ata_port_operations nv_ck804_ops = {
400 .port_disable = ata_port_disable,
401 .tf_load = ata_tf_load,
402 .tf_read = ata_tf_read,
403 .exec_command = ata_exec_command,
404 .check_status = ata_check_status,
405 .dev_select = ata_std_dev_select,
406 .bmdma_setup = ata_bmdma_setup,
407 .bmdma_start = ata_bmdma_start,
408 .bmdma_stop = ata_bmdma_stop,
409 .bmdma_status = ata_bmdma_status,
410 .qc_prep = ata_qc_prep,
411 .qc_issue = ata_qc_issue_prot,
412 .freeze = nv_ck804_freeze,
413 .thaw = nv_ck804_thaw,
414 .error_handler = nv_error_handler,
415 .post_internal_cmd = ata_bmdma_post_internal_cmd,
416 .data_xfer = ata_data_xfer,
417 .irq_handler = nv_ck804_interrupt,
418 .irq_clear = ata_bmdma_irq_clear,
419 .irq_on = ata_irq_on,
420 .irq_ack = ata_irq_ack,
421 .scr_read = nv_scr_read,
422 .scr_write = nv_scr_write,
423 .port_start = ata_port_start,
424 .host_stop = nv_ck804_host_stop,
427 static const struct ata_port_operations nv_adma_ops = {
428 .port_disable = ata_port_disable,
429 .tf_load = ata_tf_load,
430 .tf_read = ata_tf_read,
431 .check_atapi_dma = nv_adma_check_atapi_dma,
432 .exec_command = ata_exec_command,
433 .check_status = ata_check_status,
434 .dev_select = ata_std_dev_select,
435 .bmdma_setup = nv_adma_bmdma_setup,
436 .bmdma_start = nv_adma_bmdma_start,
437 .bmdma_stop = nv_adma_bmdma_stop,
438 .bmdma_status = nv_adma_bmdma_status,
439 .qc_prep = nv_adma_qc_prep,
440 .qc_issue = nv_adma_qc_issue,
441 .freeze = nv_ck804_freeze,
442 .thaw = nv_ck804_thaw,
443 .error_handler = nv_adma_error_handler,
444 .post_internal_cmd = nv_adma_bmdma_stop,
445 .data_xfer = ata_data_xfer,
446 .irq_handler = nv_adma_interrupt,
447 .irq_clear = nv_adma_irq_clear,
448 .irq_on = ata_irq_on,
449 .irq_ack = ata_irq_ack,
450 .scr_read = nv_scr_read,
451 .scr_write = nv_scr_write,
452 .port_start = nv_adma_port_start,
453 .port_stop = nv_adma_port_stop,
454 .port_suspend = nv_adma_port_suspend,
455 .port_resume = nv_adma_port_resume,
456 .host_stop = nv_adma_host_stop,
459 static struct ata_port_info nv_port_info[] = {
463 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
464 ATA_FLAG_HRST_TO_RESUME,
465 .pio_mask = NV_PIO_MASK,
466 .mwdma_mask = NV_MWDMA_MASK,
467 .udma_mask = NV_UDMA_MASK,
468 .port_ops = &nv_generic_ops,
473 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
474 ATA_FLAG_HRST_TO_RESUME,
475 .pio_mask = NV_PIO_MASK,
476 .mwdma_mask = NV_MWDMA_MASK,
477 .udma_mask = NV_UDMA_MASK,
478 .port_ops = &nv_nf2_ops,
483 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
484 ATA_FLAG_HRST_TO_RESUME,
485 .pio_mask = NV_PIO_MASK,
486 .mwdma_mask = NV_MWDMA_MASK,
487 .udma_mask = NV_UDMA_MASK,
488 .port_ops = &nv_ck804_ops,
493 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
494 ATA_FLAG_HRST_TO_RESUME |
495 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
496 .pio_mask = NV_PIO_MASK,
497 .mwdma_mask = NV_MWDMA_MASK,
498 .udma_mask = NV_UDMA_MASK,
499 .port_ops = &nv_adma_ops,
503 MODULE_AUTHOR("NVIDIA");
504 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
505 MODULE_LICENSE("GPL");
506 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
507 MODULE_VERSION(DRV_VERSION);
509 static int adma_enabled = 1;
511 static void nv_adma_register_mode(struct ata_port *ap)
513 struct nv_adma_port_priv *pp = ap->private_data;
514 void __iomem *mmio = pp->ctl_block;
518 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
521 status = readw(mmio + NV_ADMA_STAT);
522 while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
524 status = readw(mmio + NV_ADMA_STAT);
528 ata_port_printk(ap, KERN_WARNING,
529 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
532 tmp = readw(mmio + NV_ADMA_CTL);
533 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
536 status = readw(mmio + NV_ADMA_STAT);
537 while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
539 status = readw(mmio + NV_ADMA_STAT);
543 ata_port_printk(ap, KERN_WARNING,
544 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
547 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
550 static void nv_adma_mode(struct ata_port *ap)
552 struct nv_adma_port_priv *pp = ap->private_data;
553 void __iomem *mmio = pp->ctl_block;
557 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
560 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
562 tmp = readw(mmio + NV_ADMA_CTL);
563 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
565 status = readw(mmio + NV_ADMA_STAT);
566 while(((status & NV_ADMA_STAT_LEGACY) ||
567 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
569 status = readw(mmio + NV_ADMA_STAT);
573 ata_port_printk(ap, KERN_WARNING,
574 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
577 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
580 static int nv_adma_slave_config(struct scsi_device *sdev)
582 struct ata_port *ap = ata_shost_to_port(sdev->host);
583 struct nv_adma_port_priv *pp = ap->private_data;
584 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
586 unsigned long segment_boundary;
587 unsigned short sg_tablesize;
590 u32 current_reg, new_reg, config_mask;
592 rc = ata_scsi_slave_config(sdev);
594 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
595 /* Not a proper libata device, ignore */
598 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
600 * NVIDIA reports that ADMA mode does not support ATAPI commands.
601 * Therefore ATAPI commands are sent through the legacy interface.
602 * However, the legacy interface only supports 32-bit DMA.
603 * Restrict DMA parameters as required by the legacy interface
604 * when an ATAPI device is connected.
606 bounce_limit = ATA_DMA_MASK;
607 segment_boundary = ATA_DMA_BOUNDARY;
608 /* Subtract 1 since an extra entry may be needed for padding, see
610 sg_tablesize = LIBATA_MAX_PRD - 1;
612 /* Since the legacy DMA engine is in use, we need to disable ADMA
615 nv_adma_register_mode(ap);
618 bounce_limit = *ap->dev->dma_mask;
619 segment_boundary = NV_ADMA_DMA_BOUNDARY;
620 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
624 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
627 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
628 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
630 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
631 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
634 new_reg = current_reg | config_mask;
635 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
638 new_reg = current_reg & ~config_mask;
639 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
642 if(current_reg != new_reg)
643 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
645 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
646 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
647 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
648 ata_port_printk(ap, KERN_INFO,
649 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
650 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
654 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
656 struct nv_adma_port_priv *pp = qc->ap->private_data;
657 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
660 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
662 unsigned int idx = 0;
664 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
666 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
667 cpb[idx++] = cpu_to_le16(IGN);
668 cpb[idx++] = cpu_to_le16(IGN);
669 cpb[idx++] = cpu_to_le16(IGN);
670 cpb[idx++] = cpu_to_le16(IGN);
671 cpb[idx++] = cpu_to_le16(IGN);
674 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
675 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
676 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
677 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
678 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
680 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
681 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
682 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
683 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
684 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
686 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
691 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
693 struct nv_adma_port_priv *pp = ap->private_data;
694 u8 flags = pp->cpb[cpb_num].resp_flags;
696 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
698 if (unlikely((force_err ||
699 flags & (NV_CPB_RESP_ATA_ERR |
700 NV_CPB_RESP_CMD_ERR |
701 NV_CPB_RESP_CPB_ERR)))) {
702 struct ata_eh_info *ehi = &ap->eh_info;
705 ata_ehi_clear_desc(ehi);
706 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
707 if (flags & NV_CPB_RESP_ATA_ERR) {
708 ata_ehi_push_desc(ehi, ": ATA error");
709 ehi->err_mask |= AC_ERR_DEV;
710 } else if (flags & NV_CPB_RESP_CMD_ERR) {
711 ata_ehi_push_desc(ehi, ": CMD error");
712 ehi->err_mask |= AC_ERR_DEV;
713 } else if (flags & NV_CPB_RESP_CPB_ERR) {
714 ata_ehi_push_desc(ehi, ": CPB error");
715 ehi->err_mask |= AC_ERR_SYSTEM;
718 /* notifier error, but no error in CPB flags? */
719 ehi->err_mask |= AC_ERR_OTHER;
722 /* Kill all commands. EH will determine what actually failed. */
730 if (flags & NV_CPB_RESP_DONE) {
731 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
732 VPRINTK("CPB flags done, flags=0x%x\n", flags);
734 /* Grab the ATA port status for non-NCQ commands.
735 For NCQ commands the current status may have nothing to do with
736 the command just completed. */
737 if (qc->tf.protocol != ATA_PROT_NCQ) {
738 u8 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
739 qc->err_mask |= ac_err_mask(ata_status);
741 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
749 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
751 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
753 /* freeze if hotplugged */
754 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
759 /* bail out if not our interrupt */
760 if (!(irq_stat & NV_INT_DEV))
763 /* DEV interrupt w/ no active qc? */
764 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
765 ata_check_status(ap);
769 /* handle interrupt */
770 return ata_host_intr(ap, qc);
773 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
775 struct ata_host *host = dev_instance;
777 u32 notifier_clears[2];
779 spin_lock(&host->lock);
781 for (i = 0; i < host->n_ports; i++) {
782 struct ata_port *ap = host->ports[i];
783 notifier_clears[i] = 0;
785 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
786 struct nv_adma_port_priv *pp = ap->private_data;
787 void __iomem *mmio = pp->ctl_block;
790 u32 notifier, notifier_error;
792 /* if in ATA register mode, use standard ata interrupt handler */
793 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
794 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
795 >> (NV_INT_PORT_SHIFT * i);
796 if(ata_tag_valid(ap->active_tag))
797 /** NV_INT_DEV indication seems unreliable at times
798 at least in ADMA mode. Force it on always when a
799 command is active, to prevent losing interrupts. */
800 irq_stat |= NV_INT_DEV;
801 handled += nv_host_intr(ap, irq_stat);
805 notifier = readl(mmio + NV_ADMA_NOTIFIER);
806 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
807 notifier_clears[i] = notifier | notifier_error;
809 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
811 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
816 status = readw(mmio + NV_ADMA_STAT);
818 /* Clear status. Ensure the controller sees the clearing before we start
819 looking at any of the CPB statuses, so that any CPB completions after
820 this point in the handler will raise another interrupt. */
821 writew(status, mmio + NV_ADMA_STAT);
822 readw(mmio + NV_ADMA_STAT); /* flush posted write */
825 handled++; /* irq handled if we got here */
827 /* freeze if hotplugged or controller error */
828 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
829 NV_ADMA_STAT_HOTUNPLUG |
830 NV_ADMA_STAT_TIMEOUT |
831 NV_ADMA_STAT_SERROR))) {
832 struct ata_eh_info *ehi = &ap->eh_info;
834 ata_ehi_clear_desc(ehi);
835 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
836 if (status & NV_ADMA_STAT_TIMEOUT) {
837 ehi->err_mask |= AC_ERR_SYSTEM;
838 ata_ehi_push_desc(ehi, ": timeout");
839 } else if (status & NV_ADMA_STAT_HOTPLUG) {
840 ata_ehi_hotplugged(ehi);
841 ata_ehi_push_desc(ehi, ": hotplug");
842 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
843 ata_ehi_hotplugged(ehi);
844 ata_ehi_push_desc(ehi, ": hot unplug");
845 } else if (status & NV_ADMA_STAT_SERROR) {
846 /* let libata analyze SError and figure out the cause */
847 ata_ehi_push_desc(ehi, ": SError");
853 if (status & (NV_ADMA_STAT_DONE |
854 NV_ADMA_STAT_CPBERR)) {
855 /** Check CPBs for completed commands */
857 if (ata_tag_valid(ap->active_tag)) {
858 /* Non-NCQ command */
859 nv_adma_check_cpb(ap, ap->active_tag,
860 notifier_error & (1 << ap->active_tag));
863 u32 active = ap->sactive;
865 while ((pos = ffs(active)) && !error) {
867 error = nv_adma_check_cpb(ap, pos,
868 notifier_error & (1 << pos) );
869 active &= ~(1 << pos );
876 if(notifier_clears[0] || notifier_clears[1]) {
877 /* Note: Both notifier clear registers must be written
878 if either is set, even if one is zero, according to NVIDIA. */
879 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
880 writel(notifier_clears[0], pp->notifier_clear_block);
881 pp = host->ports[1]->private_data;
882 writel(notifier_clears[1], pp->notifier_clear_block);
885 spin_unlock(&host->lock);
887 return IRQ_RETVAL(handled);
890 static void nv_adma_irq_clear(struct ata_port *ap)
892 struct nv_adma_port_priv *pp = ap->private_data;
893 void __iomem *mmio = pp->ctl_block;
894 u16 status = readw(mmio + NV_ADMA_STAT);
895 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
896 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
897 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
899 /* clear ADMA status */
900 writew(status, mmio + NV_ADMA_STAT);
901 writel(notifier | notifier_error,
902 pp->notifier_clear_block);
904 /** clear legacy status */
905 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
908 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
910 struct ata_port *ap = qc->ap;
911 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
912 struct nv_adma_port_priv *pp = ap->private_data;
915 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
920 /* load PRD table addr. */
921 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
923 /* specify data direction, triple-check start bit is clear */
924 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
925 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
927 dmactl |= ATA_DMA_WR;
929 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
931 /* issue r/w command */
932 ata_exec_command(ap, &qc->tf);
935 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
937 struct ata_port *ap = qc->ap;
938 struct nv_adma_port_priv *pp = ap->private_data;
941 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
946 /* start host DMA transaction */
947 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
948 iowrite8(dmactl | ATA_DMA_START,
949 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
952 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
954 struct ata_port *ap = qc->ap;
955 struct nv_adma_port_priv *pp = ap->private_data;
957 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
960 /* clear start/stop bit */
961 iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
962 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
964 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
965 ata_altstatus(ap); /* dummy read */
968 static u8 nv_adma_bmdma_status(struct ata_port *ap)
970 struct nv_adma_port_priv *pp = ap->private_data;
972 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
974 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
977 static int nv_adma_port_start(struct ata_port *ap)
979 struct device *dev = ap->host->dev;
980 struct nv_adma_port_priv *pp;
989 rc = ata_port_start(ap);
993 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
997 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
998 ap->port_no * NV_ADMA_PORT_SIZE;
999 pp->ctl_block = mmio;
1000 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1001 pp->notifier_clear_block = pp->gen_block +
1002 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1004 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1005 &mem_dma, GFP_KERNEL);
1008 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1011 * First item in chunk of DMA memory:
1012 * 128-byte command parameter block (CPB)
1013 * one for each command tag
1016 pp->cpb_dma = mem_dma;
1018 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1019 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1021 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1022 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1025 * Second item: block of ADMA_SGTBL_LEN s/g entries
1028 pp->aprd_dma = mem_dma;
1030 ap->private_data = pp;
1032 /* clear any outstanding interrupt conditions */
1033 writew(0xffff, mmio + NV_ADMA_STAT);
1035 /* initialize port variables */
1036 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1038 /* clear CPB fetch count */
1039 writew(0, mmio + NV_ADMA_CPB_COUNT);
1041 /* clear GO for register mode, enable interrupt */
1042 tmp = readw(mmio + NV_ADMA_CTL);
1043 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1045 tmp = readw(mmio + NV_ADMA_CTL);
1046 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1047 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1049 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1050 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1055 static void nv_adma_port_stop(struct ata_port *ap)
1057 struct nv_adma_port_priv *pp = ap->private_data;
1058 void __iomem *mmio = pp->ctl_block;
1061 writew(0, mmio + NV_ADMA_CTL);
1064 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1066 struct nv_adma_port_priv *pp = ap->private_data;
1067 void __iomem *mmio = pp->ctl_block;
1069 /* Go to register mode - clears GO */
1070 nv_adma_register_mode(ap);
1072 /* clear CPB fetch count */
1073 writew(0, mmio + NV_ADMA_CPB_COUNT);
1075 /* disable interrupt, shut down port */
1076 writew(0, mmio + NV_ADMA_CTL);
1081 static int nv_adma_port_resume(struct ata_port *ap)
1083 struct nv_adma_port_priv *pp = ap->private_data;
1084 void __iomem *mmio = pp->ctl_block;
1087 /* set CPB block location */
1088 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1089 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1091 /* clear any outstanding interrupt conditions */
1092 writew(0xffff, mmio + NV_ADMA_STAT);
1094 /* initialize port variables */
1095 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1097 /* clear CPB fetch count */
1098 writew(0, mmio + NV_ADMA_CPB_COUNT);
1100 /* clear GO for register mode, enable interrupt */
1101 tmp = readw(mmio + NV_ADMA_CTL);
1102 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1104 tmp = readw(mmio + NV_ADMA_CTL);
1105 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1106 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1108 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1109 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1114 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1116 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
1117 struct ata_ioports *ioport = &probe_ent->port[port];
1121 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1123 ioport->cmd_addr = mmio;
1124 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1125 ioport->error_addr =
1126 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1127 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1128 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1129 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1130 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1131 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1132 ioport->status_addr =
1133 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1134 ioport->altstatus_addr =
1135 ioport->ctl_addr = mmio + 0x20;
1138 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1140 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1146 /* enable ADMA on the ports */
1147 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1148 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1149 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1150 NV_MCP_SATA_CFG_20_PORT1_EN |
1151 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1153 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1155 for (i = 0; i < probe_ent->n_ports; i++)
1156 nv_adma_setup_port(probe_ent, i);
1161 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1162 struct scatterlist *sg,
1164 struct nv_adma_prd *aprd)
1168 memset(aprd, 0, sizeof(struct nv_adma_prd));
1171 if (qc->tf.flags & ATA_TFLAG_WRITE)
1172 flags |= NV_APRD_WRITE;
1173 if (idx == qc->n_elem - 1)
1174 flags |= NV_APRD_END;
1176 flags |= NV_APRD_CONT;
1178 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1179 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1180 aprd->flags = flags;
1183 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1185 struct nv_adma_port_priv *pp = qc->ap->private_data;
1187 struct nv_adma_prd *aprd;
1188 struct scatterlist *sg;
1194 ata_for_each_sg(sg, qc) {
1195 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1196 nv_adma_fill_aprd(qc, sg, idx, aprd);
1200 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1203 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1205 struct nv_adma_port_priv *pp = qc->ap->private_data;
1207 /* ADMA engine can only be used for non-ATAPI DMA commands,
1208 or interrupt-driven no-data commands. */
1209 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1210 (qc->tf.flags & ATA_TFLAG_POLLING))
1213 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1214 (qc->tf.protocol == ATA_PROT_NODATA))
1220 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1222 struct nv_adma_port_priv *pp = qc->ap->private_data;
1223 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1224 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1227 if (nv_adma_use_reg_mode(qc)) {
1228 nv_adma_register_mode(qc->ap);
1233 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1237 cpb->next_cpb_idx = 0;
1239 /* turn on NCQ flags for NCQ commands */
1240 if (qc->tf.protocol == ATA_PROT_NCQ)
1241 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1243 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1245 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1247 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1248 nv_adma_fill_sg(qc, cpb);
1249 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1251 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1253 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1254 finished filling in all of the contents */
1256 cpb->ctl_flags = ctl_flags;
1259 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1261 struct nv_adma_port_priv *pp = qc->ap->private_data;
1262 void __iomem *mmio = pp->ctl_block;
1266 if (nv_adma_use_reg_mode(qc)) {
1267 /* use ATA register mode */
1268 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1269 nv_adma_register_mode(qc->ap);
1270 return ata_qc_issue_prot(qc);
1272 nv_adma_mode(qc->ap);
1274 /* write append register, command tag in lower 8 bits
1275 and (number of cpbs to append -1) in top 8 bits */
1277 writew(qc->tag, mmio + NV_ADMA_APPEND);
1279 DPRINTK("Issued tag %u\n",qc->tag);
1284 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1286 struct ata_host *host = dev_instance;
1288 unsigned int handled = 0;
1289 unsigned long flags;
1291 spin_lock_irqsave(&host->lock, flags);
1293 for (i = 0; i < host->n_ports; i++) {
1294 struct ata_port *ap;
1296 ap = host->ports[i];
1298 !(ap->flags & ATA_FLAG_DISABLED)) {
1299 struct ata_queued_cmd *qc;
1301 qc = ata_qc_from_tag(ap, ap->active_tag);
1302 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1303 handled += ata_host_intr(ap, qc);
1305 // No request pending? Clear interrupt status
1306 // anyway, in case there's one pending.
1307 ap->ops->check_status(ap);
1312 spin_unlock_irqrestore(&host->lock, flags);
1314 return IRQ_RETVAL(handled);
1317 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1321 for (i = 0; i < host->n_ports; i++) {
1322 struct ata_port *ap = host->ports[i];
1324 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1325 handled += nv_host_intr(ap, irq_stat);
1327 irq_stat >>= NV_INT_PORT_SHIFT;
1330 return IRQ_RETVAL(handled);
1333 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1335 struct ata_host *host = dev_instance;
1339 spin_lock(&host->lock);
1340 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1341 ret = nv_do_interrupt(host, irq_stat);
1342 spin_unlock(&host->lock);
1347 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1349 struct ata_host *host = dev_instance;
1353 spin_lock(&host->lock);
1354 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1355 ret = nv_do_interrupt(host, irq_stat);
1356 spin_unlock(&host->lock);
1361 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1363 if (sc_reg > SCR_CONTROL)
1366 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1369 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1371 if (sc_reg > SCR_CONTROL)
1374 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1377 static void nv_nf2_freeze(struct ata_port *ap)
1379 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1380 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1383 mask = ioread8(scr_addr + NV_INT_ENABLE);
1384 mask &= ~(NV_INT_ALL << shift);
1385 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1388 static void nv_nf2_thaw(struct ata_port *ap)
1390 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1391 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1394 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1396 mask = ioread8(scr_addr + NV_INT_ENABLE);
1397 mask |= (NV_INT_MASK << shift);
1398 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1401 static void nv_ck804_freeze(struct ata_port *ap)
1403 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1404 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1407 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1408 mask &= ~(NV_INT_ALL << shift);
1409 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1412 static void nv_ck804_thaw(struct ata_port *ap)
1414 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1415 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1418 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1420 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1421 mask |= (NV_INT_MASK << shift);
1422 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1425 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1429 /* SATA hardreset fails to retrieve proper device signature on
1430 * some controllers. Don't classify on hardreset. For more
1431 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1433 return sata_std_hardreset(ap, &dummy);
1436 static void nv_error_handler(struct ata_port *ap)
1438 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1439 nv_hardreset, ata_std_postreset);
1442 static void nv_adma_error_handler(struct ata_port *ap)
1444 struct nv_adma_port_priv *pp = ap->private_data;
1445 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1446 void __iomem *mmio = pp->ctl_block;
1450 /* Push us back into port register mode for error handling. */
1451 nv_adma_register_mode(ap);
1453 /* Mark all of the CPBs as invalid to prevent them from being executed */
1454 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1455 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1457 /* clear CPB fetch count */
1458 writew(0, mmio + NV_ADMA_CPB_COUNT);
1461 tmp = readw(mmio + NV_ADMA_CTL);
1462 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1463 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1465 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1466 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1469 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1470 nv_hardreset, ata_std_postreset);
1473 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1475 static int printed_version = 0;
1476 struct ata_port_info *ppi[2];
1477 struct ata_probe_ent *probe_ent;
1478 struct nv_host_priv *hpriv;
1482 unsigned long type = ent->driver_data;
1485 // Make sure this is a SATA controller by counting the number of bars
1486 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1487 // it's an IDE controller and we ignore it.
1488 for (bar=0; bar<6; bar++)
1489 if (pci_resource_start(pdev, bar) == 0)
1492 if (!printed_version++)
1493 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1495 rc = pcim_enable_device(pdev);
1499 rc = pci_request_regions(pdev, DRV_NAME);
1501 pcim_pin_device(pdev);
1505 if(type >= CK804 && adma_enabled) {
1506 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1508 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1509 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1514 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1517 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1524 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1528 ppi[0] = ppi[1] = &nv_port_info[type];
1529 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1533 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
1535 probe_ent->iomap = pcim_iomap_table(pdev);
1537 probe_ent->private_data = hpriv;
1540 base = probe_ent->iomap[NV_MMIO_BAR];
1541 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1542 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1544 /* enable SATA space for CK804 */
1545 if (type >= CK804) {
1548 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1549 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1550 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1553 pci_set_master(pdev);
1556 rc = nv_adma_host_init(probe_ent);
1561 rc = ata_device_add(probe_ent);
1565 devm_kfree(&pdev->dev, probe_ent);
1569 static void nv_remove_one (struct pci_dev *pdev)
1571 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1572 struct nv_host_priv *hpriv = host->private_data;
1574 ata_pci_remove_one(pdev);
1578 static int nv_pci_device_resume(struct pci_dev *pdev)
1580 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1581 struct nv_host_priv *hpriv = host->private_data;
1584 rc = ata_pci_device_do_resume(pdev);
1588 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1589 if(hpriv->type >= CK804) {
1592 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1593 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1594 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1596 if(hpriv->type == ADMA) {
1598 struct nv_adma_port_priv *pp;
1599 /* enable/disable ADMA on the ports appropriately */
1600 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1602 pp = host->ports[0]->private_data;
1603 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1604 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1605 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1607 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1608 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1609 pp = host->ports[1]->private_data;
1610 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1611 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1612 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1614 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1615 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1617 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1621 ata_host_resume(host);
1626 static void nv_ck804_host_stop(struct ata_host *host)
1628 struct pci_dev *pdev = to_pci_dev(host->dev);
1631 /* disable SATA space for CK804 */
1632 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1633 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1634 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1637 static void nv_adma_host_stop(struct ata_host *host)
1639 struct pci_dev *pdev = to_pci_dev(host->dev);
1642 /* disable ADMA on the ports */
1643 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1644 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1645 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1646 NV_MCP_SATA_CFG_20_PORT1_EN |
1647 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1649 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1651 nv_ck804_host_stop(host);
1654 static int __init nv_init(void)
1656 return pci_register_driver(&nv_pci_driver);
1659 static void __exit nv_exit(void)
1661 pci_unregister_driver(&nv_pci_driver);
1664 module_init(nv_init);
1665 module_exit(nv_exit);
1666 module_param_named(adma, adma_enabled, bool, 0444);
1667 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");