b43: Add support for new firmware
[linux-2.6] / drivers / ata / sata_nv.c
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50
51 #define DRV_NAME                        "sata_nv"
52 #define DRV_VERSION                     "3.5"
53
54 #define NV_ADMA_DMA_BOUNDARY            0xffffffffUL
55
56 enum {
57         NV_MMIO_BAR                     = 5,
58
59         NV_PORTS                        = 2,
60         NV_PIO_MASK                     = 0x1f,
61         NV_MWDMA_MASK                   = 0x07,
62         NV_UDMA_MASK                    = 0x7f,
63         NV_PORT0_SCR_REG_OFFSET         = 0x00,
64         NV_PORT1_SCR_REG_OFFSET         = 0x40,
65
66         /* INT_STATUS/ENABLE */
67         NV_INT_STATUS                   = 0x10,
68         NV_INT_ENABLE                   = 0x11,
69         NV_INT_STATUS_CK804             = 0x440,
70         NV_INT_ENABLE_CK804             = 0x441,
71
72         /* INT_STATUS/ENABLE bits */
73         NV_INT_DEV                      = 0x01,
74         NV_INT_PM                       = 0x02,
75         NV_INT_ADDED                    = 0x04,
76         NV_INT_REMOVED                  = 0x08,
77
78         NV_INT_PORT_SHIFT               = 4,    /* each port occupies 4 bits */
79
80         NV_INT_ALL                      = 0x0f,
81         NV_INT_MASK                     = NV_INT_DEV |
82                                           NV_INT_ADDED | NV_INT_REMOVED,
83
84         /* INT_CONFIG */
85         NV_INT_CONFIG                   = 0x12,
86         NV_INT_CONFIG_METHD             = 0x01, // 0 = INT, 1 = SMI
87
88         // For PCI config register 20
89         NV_MCP_SATA_CFG_20              = 0x50,
90         NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91         NV_MCP_SATA_CFG_20_PORT0_EN     = (1 << 17),
92         NV_MCP_SATA_CFG_20_PORT1_EN     = (1 << 16),
93         NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94         NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96         NV_ADMA_MAX_CPBS                = 32,
97         NV_ADMA_CPB_SZ                  = 128,
98         NV_ADMA_APRD_SZ                 = 16,
99         NV_ADMA_SGTBL_LEN               = (1024 - NV_ADMA_CPB_SZ) /
100                                            NV_ADMA_APRD_SZ,
101         NV_ADMA_SGTBL_TOTAL_LEN         = NV_ADMA_SGTBL_LEN + 5,
102         NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103         NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104                                            (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106         /* BAR5 offset to ADMA general registers */
107         NV_ADMA_GEN                     = 0x400,
108         NV_ADMA_GEN_CTL                 = 0x00,
109         NV_ADMA_NOTIFIER_CLEAR          = 0x30,
110
111         /* BAR5 offset to ADMA ports */
112         NV_ADMA_PORT                    = 0x480,
113
114         /* size of ADMA port register space  */
115         NV_ADMA_PORT_SIZE               = 0x100,
116
117         /* ADMA port registers */
118         NV_ADMA_CTL                     = 0x40,
119         NV_ADMA_CPB_COUNT               = 0x42,
120         NV_ADMA_NEXT_CPB_IDX            = 0x43,
121         NV_ADMA_STAT                    = 0x44,
122         NV_ADMA_CPB_BASE_LOW            = 0x48,
123         NV_ADMA_CPB_BASE_HIGH           = 0x4C,
124         NV_ADMA_APPEND                  = 0x50,
125         NV_ADMA_NOTIFIER                = 0x68,
126         NV_ADMA_NOTIFIER_ERROR          = 0x6C,
127
128         /* NV_ADMA_CTL register bits */
129         NV_ADMA_CTL_HOTPLUG_IEN         = (1 << 0),
130         NV_ADMA_CTL_CHANNEL_RESET       = (1 << 5),
131         NV_ADMA_CTL_GO                  = (1 << 7),
132         NV_ADMA_CTL_AIEN                = (1 << 8),
133         NV_ADMA_CTL_READ_NON_COHERENT   = (1 << 11),
134         NV_ADMA_CTL_WRITE_NON_COHERENT  = (1 << 12),
135
136         /* CPB response flag bits */
137         NV_CPB_RESP_DONE                = (1 << 0),
138         NV_CPB_RESP_ATA_ERR             = (1 << 3),
139         NV_CPB_RESP_CMD_ERR             = (1 << 4),
140         NV_CPB_RESP_CPB_ERR             = (1 << 7),
141
142         /* CPB control flag bits */
143         NV_CPB_CTL_CPB_VALID            = (1 << 0),
144         NV_CPB_CTL_QUEUE                = (1 << 1),
145         NV_CPB_CTL_APRD_VALID           = (1 << 2),
146         NV_CPB_CTL_IEN                  = (1 << 3),
147         NV_CPB_CTL_FPDMA                = (1 << 4),
148
149         /* APRD flags */
150         NV_APRD_WRITE                   = (1 << 1),
151         NV_APRD_END                     = (1 << 2),
152         NV_APRD_CONT                    = (1 << 3),
153
154         /* NV_ADMA_STAT flags */
155         NV_ADMA_STAT_TIMEOUT            = (1 << 0),
156         NV_ADMA_STAT_HOTUNPLUG          = (1 << 1),
157         NV_ADMA_STAT_HOTPLUG            = (1 << 2),
158         NV_ADMA_STAT_CPBERR             = (1 << 4),
159         NV_ADMA_STAT_SERROR             = (1 << 5),
160         NV_ADMA_STAT_CMD_COMPLETE       = (1 << 6),
161         NV_ADMA_STAT_IDLE               = (1 << 8),
162         NV_ADMA_STAT_LEGACY             = (1 << 9),
163         NV_ADMA_STAT_STOPPED            = (1 << 10),
164         NV_ADMA_STAT_DONE               = (1 << 12),
165         NV_ADMA_STAT_ERR                = NV_ADMA_STAT_CPBERR |
166                                           NV_ADMA_STAT_TIMEOUT,
167
168         /* port flags */
169         NV_ADMA_PORT_REGISTER_MODE      = (1 << 0),
170         NV_ADMA_ATAPI_SETUP_COMPLETE    = (1 << 1),
171
172         /* MCP55 reg offset */
173         NV_CTL_MCP55                    = 0x400,
174         NV_INT_STATUS_MCP55             = 0x440,
175         NV_INT_ENABLE_MCP55             = 0x444,
176         NV_NCQ_REG_MCP55                = 0x448,
177
178         /* MCP55 */
179         NV_INT_ALL_MCP55                = 0xffff,
180         NV_INT_PORT_SHIFT_MCP55         = 16,   /* each port occupies 16 bits */
181         NV_INT_MASK_MCP55               = NV_INT_ALL_MCP55 & 0xfffd,
182
183         /* SWNCQ ENABLE BITS*/
184         NV_CTL_PRI_SWNCQ                = 0x02,
185         NV_CTL_SEC_SWNCQ                = 0x04,
186
187         /* SW NCQ status bits*/
188         NV_SWNCQ_IRQ_DEV                = (1 << 0),
189         NV_SWNCQ_IRQ_PM                 = (1 << 1),
190         NV_SWNCQ_IRQ_ADDED              = (1 << 2),
191         NV_SWNCQ_IRQ_REMOVED            = (1 << 3),
192
193         NV_SWNCQ_IRQ_BACKOUT            = (1 << 4),
194         NV_SWNCQ_IRQ_SDBFIS             = (1 << 5),
195         NV_SWNCQ_IRQ_DHREGFIS           = (1 << 6),
196         NV_SWNCQ_IRQ_DMASETUP           = (1 << 7),
197
198         NV_SWNCQ_IRQ_HOTPLUG            = NV_SWNCQ_IRQ_ADDED |
199                                           NV_SWNCQ_IRQ_REMOVED,
200
201 };
202
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205         __le64                  addr;
206         __le32                  len;
207         u8                      flags;
208         u8                      packet_len;
209         __le16                  reserved;
210 };
211
212 enum nv_adma_regbits {
213         CMDEND  = (1 << 15),            /* end of command list */
214         WNB     = (1 << 14),            /* wait-not-BSY */
215         IGN     = (1 << 13),            /* ignore this entry */
216         CS1n    = (1 << (4 + 8)),       /* std. PATA signals follow... */
217         DA2     = (1 << (2 + 8)),
218         DA1     = (1 << (1 + 8)),
219         DA0     = (1 << (0 + 8)),
220 };
221
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227         u8                      resp_flags;    /* 0 */
228         u8                      reserved1;     /* 1 */
229         u8                      ctl_flags;     /* 2 */
230         /* len is length of taskfile in 64 bit words */
231         u8                      len;            /* 3  */
232         u8                      tag;           /* 4 */
233         u8                      next_cpb_idx;  /* 5 */
234         __le16                  reserved2;     /* 6-7 */
235         __le16                  tf[12];        /* 8-31 */
236         struct nv_adma_prd      aprd[5];       /* 32-111 */
237         __le64                  next_aprd;     /* 112-119 */
238         __le64                  reserved3;     /* 120-127 */
239 };
240
241
242 struct nv_adma_port_priv {
243         struct nv_adma_cpb      *cpb;
244         dma_addr_t              cpb_dma;
245         struct nv_adma_prd      *aprd;
246         dma_addr_t              aprd_dma;
247         void __iomem            *ctl_block;
248         void __iomem            *gen_block;
249         void __iomem            *notifier_clear_block;
250         u8                      flags;
251         int                     last_issue_ncq;
252 };
253
254 struct nv_host_priv {
255         unsigned long           type;
256 };
257
258 struct defer_queue {
259         u32             defer_bits;
260         unsigned int    head;
261         unsigned int    tail;
262         unsigned int    tag[ATA_MAX_QUEUE];
263 };
264
265 enum ncq_saw_flag_list {
266         ncq_saw_d2h     = (1U << 0),
267         ncq_saw_dmas    = (1U << 1),
268         ncq_saw_sdb     = (1U << 2),
269         ncq_saw_backout = (1U << 3),
270 };
271
272 struct nv_swncq_port_priv {
273         struct ata_prd  *prd;    /* our SG list */
274         dma_addr_t      prd_dma; /* and its DMA mapping */
275         void __iomem    *sactive_block;
276         void __iomem    *irq_block;
277         void __iomem    *tag_block;
278         u32             qc_active;
279
280         unsigned int    last_issue_tag;
281
282         /* fifo circular queue to store deferral command */
283         struct defer_queue defer_queue;
284
285         /* for NCQ interrupt analysis */
286         u32             dhfis_bits;
287         u32             dmafis_bits;
288         u32             sdbfis_bits;
289
290         unsigned int    ncq_flags;
291 };
292
293
294 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
295
296 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
297 #ifdef CONFIG_PM
298 static int nv_pci_device_resume(struct pci_dev *pdev);
299 #endif
300 static void nv_ck804_host_stop(struct ata_host *host);
301 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
302 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
304 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
305 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
306
307 static void nv_nf2_freeze(struct ata_port *ap);
308 static void nv_nf2_thaw(struct ata_port *ap);
309 static void nv_ck804_freeze(struct ata_port *ap);
310 static void nv_ck804_thaw(struct ata_port *ap);
311 static void nv_error_handler(struct ata_port *ap);
312 static int nv_adma_slave_config(struct scsi_device *sdev);
313 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
314 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
315 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
316 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
317 static void nv_adma_irq_clear(struct ata_port *ap);
318 static int nv_adma_port_start(struct ata_port *ap);
319 static void nv_adma_port_stop(struct ata_port *ap);
320 #ifdef CONFIG_PM
321 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
322 static int nv_adma_port_resume(struct ata_port *ap);
323 #endif
324 static void nv_adma_freeze(struct ata_port *ap);
325 static void nv_adma_thaw(struct ata_port *ap);
326 static void nv_adma_error_handler(struct ata_port *ap);
327 static void nv_adma_host_stop(struct ata_host *host);
328 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
329 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
330
331 static void nv_mcp55_thaw(struct ata_port *ap);
332 static void nv_mcp55_freeze(struct ata_port *ap);
333 static void nv_swncq_error_handler(struct ata_port *ap);
334 static int nv_swncq_slave_config(struct scsi_device *sdev);
335 static int nv_swncq_port_start(struct ata_port *ap);
336 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
337 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
338 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
339 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
340 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
341 #ifdef CONFIG_PM
342 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
343 static int nv_swncq_port_resume(struct ata_port *ap);
344 #endif
345
346 enum nv_host_type
347 {
348         GENERIC,
349         NFORCE2,
350         NFORCE3 = NFORCE2,      /* NF2 == NF3 as far as sata_nv is concerned */
351         CK804,
352         ADMA,
353         SWNCQ,
354 };
355
356 static const struct pci_device_id nv_pci_tbl[] = {
357         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
358         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
359         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
360         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
361         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
362         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
363         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
364         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
365         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
366         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
367         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
368         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
369         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
370         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
371
372         { } /* terminate list */
373 };
374
375 static struct pci_driver nv_pci_driver = {
376         .name                   = DRV_NAME,
377         .id_table               = nv_pci_tbl,
378         .probe                  = nv_init_one,
379 #ifdef CONFIG_PM
380         .suspend                = ata_pci_device_suspend,
381         .resume                 = nv_pci_device_resume,
382 #endif
383         .remove                 = ata_pci_remove_one,
384 };
385
386 static struct scsi_host_template nv_sht = {
387         .module                 = THIS_MODULE,
388         .name                   = DRV_NAME,
389         .ioctl                  = ata_scsi_ioctl,
390         .queuecommand           = ata_scsi_queuecmd,
391         .can_queue              = ATA_DEF_QUEUE,
392         .this_id                = ATA_SHT_THIS_ID,
393         .sg_tablesize           = LIBATA_MAX_PRD,
394         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
395         .emulated               = ATA_SHT_EMULATED,
396         .use_clustering         = ATA_SHT_USE_CLUSTERING,
397         .proc_name              = DRV_NAME,
398         .dma_boundary           = ATA_DMA_BOUNDARY,
399         .slave_configure        = ata_scsi_slave_config,
400         .slave_destroy          = ata_scsi_slave_destroy,
401         .bios_param             = ata_std_bios_param,
402 };
403
404 static struct scsi_host_template nv_adma_sht = {
405         .module                 = THIS_MODULE,
406         .name                   = DRV_NAME,
407         .ioctl                  = ata_scsi_ioctl,
408         .queuecommand           = ata_scsi_queuecmd,
409         .change_queue_depth     = ata_scsi_change_queue_depth,
410         .can_queue              = NV_ADMA_MAX_CPBS,
411         .this_id                = ATA_SHT_THIS_ID,
412         .sg_tablesize           = NV_ADMA_SGTBL_TOTAL_LEN,
413         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
414         .emulated               = ATA_SHT_EMULATED,
415         .use_clustering         = ATA_SHT_USE_CLUSTERING,
416         .proc_name              = DRV_NAME,
417         .dma_boundary           = NV_ADMA_DMA_BOUNDARY,
418         .slave_configure        = nv_adma_slave_config,
419         .slave_destroy          = ata_scsi_slave_destroy,
420         .bios_param             = ata_std_bios_param,
421 };
422
423 static struct scsi_host_template nv_swncq_sht = {
424         .module                 = THIS_MODULE,
425         .name                   = DRV_NAME,
426         .ioctl                  = ata_scsi_ioctl,
427         .queuecommand           = ata_scsi_queuecmd,
428         .change_queue_depth     = ata_scsi_change_queue_depth,
429         .can_queue              = ATA_MAX_QUEUE,
430         .this_id                = ATA_SHT_THIS_ID,
431         .sg_tablesize           = LIBATA_MAX_PRD,
432         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
433         .emulated               = ATA_SHT_EMULATED,
434         .use_clustering         = ATA_SHT_USE_CLUSTERING,
435         .proc_name              = DRV_NAME,
436         .dma_boundary           = ATA_DMA_BOUNDARY,
437         .slave_configure        = nv_swncq_slave_config,
438         .slave_destroy          = ata_scsi_slave_destroy,
439         .bios_param             = ata_std_bios_param,
440 };
441
442 static const struct ata_port_operations nv_generic_ops = {
443         .tf_load                = ata_tf_load,
444         .tf_read                = ata_tf_read,
445         .exec_command           = ata_exec_command,
446         .check_status           = ata_check_status,
447         .dev_select             = ata_std_dev_select,
448         .bmdma_setup            = ata_bmdma_setup,
449         .bmdma_start            = ata_bmdma_start,
450         .bmdma_stop             = ata_bmdma_stop,
451         .bmdma_status           = ata_bmdma_status,
452         .qc_prep                = ata_qc_prep,
453         .qc_issue               = ata_qc_issue_prot,
454         .freeze                 = ata_bmdma_freeze,
455         .thaw                   = ata_bmdma_thaw,
456         .error_handler          = nv_error_handler,
457         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
458         .data_xfer              = ata_data_xfer,
459         .irq_clear              = ata_bmdma_irq_clear,
460         .irq_on                 = ata_irq_on,
461         .scr_read               = nv_scr_read,
462         .scr_write              = nv_scr_write,
463         .port_start             = ata_port_start,
464 };
465
466 static const struct ata_port_operations nv_nf2_ops = {
467         .tf_load                = ata_tf_load,
468         .tf_read                = ata_tf_read,
469         .exec_command           = ata_exec_command,
470         .check_status           = ata_check_status,
471         .dev_select             = ata_std_dev_select,
472         .bmdma_setup            = ata_bmdma_setup,
473         .bmdma_start            = ata_bmdma_start,
474         .bmdma_stop             = ata_bmdma_stop,
475         .bmdma_status           = ata_bmdma_status,
476         .qc_prep                = ata_qc_prep,
477         .qc_issue               = ata_qc_issue_prot,
478         .freeze                 = nv_nf2_freeze,
479         .thaw                   = nv_nf2_thaw,
480         .error_handler          = nv_error_handler,
481         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
482         .data_xfer              = ata_data_xfer,
483         .irq_clear              = ata_bmdma_irq_clear,
484         .irq_on                 = ata_irq_on,
485         .scr_read               = nv_scr_read,
486         .scr_write              = nv_scr_write,
487         .port_start             = ata_port_start,
488 };
489
490 static const struct ata_port_operations nv_ck804_ops = {
491         .tf_load                = ata_tf_load,
492         .tf_read                = ata_tf_read,
493         .exec_command           = ata_exec_command,
494         .check_status           = ata_check_status,
495         .dev_select             = ata_std_dev_select,
496         .bmdma_setup            = ata_bmdma_setup,
497         .bmdma_start            = ata_bmdma_start,
498         .bmdma_stop             = ata_bmdma_stop,
499         .bmdma_status           = ata_bmdma_status,
500         .qc_prep                = ata_qc_prep,
501         .qc_issue               = ata_qc_issue_prot,
502         .freeze                 = nv_ck804_freeze,
503         .thaw                   = nv_ck804_thaw,
504         .error_handler          = nv_error_handler,
505         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
506         .data_xfer              = ata_data_xfer,
507         .irq_clear              = ata_bmdma_irq_clear,
508         .irq_on                 = ata_irq_on,
509         .scr_read               = nv_scr_read,
510         .scr_write              = nv_scr_write,
511         .port_start             = ata_port_start,
512         .host_stop              = nv_ck804_host_stop,
513 };
514
515 static const struct ata_port_operations nv_adma_ops = {
516         .tf_load                = ata_tf_load,
517         .tf_read                = nv_adma_tf_read,
518         .check_atapi_dma        = nv_adma_check_atapi_dma,
519         .exec_command           = ata_exec_command,
520         .check_status           = ata_check_status,
521         .dev_select             = ata_std_dev_select,
522         .bmdma_setup            = ata_bmdma_setup,
523         .bmdma_start            = ata_bmdma_start,
524         .bmdma_stop             = ata_bmdma_stop,
525         .bmdma_status           = ata_bmdma_status,
526         .qc_defer               = ata_std_qc_defer,
527         .qc_prep                = nv_adma_qc_prep,
528         .qc_issue               = nv_adma_qc_issue,
529         .freeze                 = nv_adma_freeze,
530         .thaw                   = nv_adma_thaw,
531         .error_handler          = nv_adma_error_handler,
532         .post_internal_cmd      = nv_adma_post_internal_cmd,
533         .data_xfer              = ata_data_xfer,
534         .irq_clear              = nv_adma_irq_clear,
535         .irq_on                 = ata_irq_on,
536         .scr_read               = nv_scr_read,
537         .scr_write              = nv_scr_write,
538         .port_start             = nv_adma_port_start,
539         .port_stop              = nv_adma_port_stop,
540 #ifdef CONFIG_PM
541         .port_suspend           = nv_adma_port_suspend,
542         .port_resume            = nv_adma_port_resume,
543 #endif
544         .host_stop              = nv_adma_host_stop,
545 };
546
547 static const struct ata_port_operations nv_swncq_ops = {
548         .tf_load                = ata_tf_load,
549         .tf_read                = ata_tf_read,
550         .exec_command           = ata_exec_command,
551         .check_status           = ata_check_status,
552         .dev_select             = ata_std_dev_select,
553         .bmdma_setup            = ata_bmdma_setup,
554         .bmdma_start            = ata_bmdma_start,
555         .bmdma_stop             = ata_bmdma_stop,
556         .bmdma_status           = ata_bmdma_status,
557         .qc_defer               = ata_std_qc_defer,
558         .qc_prep                = nv_swncq_qc_prep,
559         .qc_issue               = nv_swncq_qc_issue,
560         .freeze                 = nv_mcp55_freeze,
561         .thaw                   = nv_mcp55_thaw,
562         .error_handler          = nv_swncq_error_handler,
563         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
564         .data_xfer              = ata_data_xfer,
565         .irq_clear              = ata_bmdma_irq_clear,
566         .irq_on                 = ata_irq_on,
567         .scr_read               = nv_scr_read,
568         .scr_write              = nv_scr_write,
569 #ifdef CONFIG_PM
570         .port_suspend           = nv_swncq_port_suspend,
571         .port_resume            = nv_swncq_port_resume,
572 #endif
573         .port_start             = nv_swncq_port_start,
574 };
575
576 static const struct ata_port_info nv_port_info[] = {
577         /* generic */
578         {
579                 .sht            = &nv_sht,
580                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
581                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
582                 .pio_mask       = NV_PIO_MASK,
583                 .mwdma_mask     = NV_MWDMA_MASK,
584                 .udma_mask      = NV_UDMA_MASK,
585                 .port_ops       = &nv_generic_ops,
586                 .irq_handler    = nv_generic_interrupt,
587         },
588         /* nforce2/3 */
589         {
590                 .sht            = &nv_sht,
591                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
592                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
593                 .pio_mask       = NV_PIO_MASK,
594                 .mwdma_mask     = NV_MWDMA_MASK,
595                 .udma_mask      = NV_UDMA_MASK,
596                 .port_ops       = &nv_nf2_ops,
597                 .irq_handler    = nv_nf2_interrupt,
598         },
599         /* ck804 */
600         {
601                 .sht            = &nv_sht,
602                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
603                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
604                 .pio_mask       = NV_PIO_MASK,
605                 .mwdma_mask     = NV_MWDMA_MASK,
606                 .udma_mask      = NV_UDMA_MASK,
607                 .port_ops       = &nv_ck804_ops,
608                 .irq_handler    = nv_ck804_interrupt,
609         },
610         /* ADMA */
611         {
612                 .sht            = &nv_adma_sht,
613                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
614                                   ATA_FLAG_MMIO | ATA_FLAG_NCQ,
615                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
616                 .pio_mask       = NV_PIO_MASK,
617                 .mwdma_mask     = NV_MWDMA_MASK,
618                 .udma_mask      = NV_UDMA_MASK,
619                 .port_ops       = &nv_adma_ops,
620                 .irq_handler    = nv_adma_interrupt,
621         },
622         /* SWNCQ */
623         {
624                 .sht            = &nv_swncq_sht,
625                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
626                                   ATA_FLAG_NCQ,
627                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
628                 .pio_mask       = NV_PIO_MASK,
629                 .mwdma_mask     = NV_MWDMA_MASK,
630                 .udma_mask      = NV_UDMA_MASK,
631                 .port_ops       = &nv_swncq_ops,
632                 .irq_handler    = nv_swncq_interrupt,
633         },
634 };
635
636 MODULE_AUTHOR("NVIDIA");
637 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
638 MODULE_LICENSE("GPL");
639 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
640 MODULE_VERSION(DRV_VERSION);
641
642 static int adma_enabled = 1;
643 static int swncq_enabled;
644
645 static void nv_adma_register_mode(struct ata_port *ap)
646 {
647         struct nv_adma_port_priv *pp = ap->private_data;
648         void __iomem *mmio = pp->ctl_block;
649         u16 tmp, status;
650         int count = 0;
651
652         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
653                 return;
654
655         status = readw(mmio + NV_ADMA_STAT);
656         while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
657                 ndelay(50);
658                 status = readw(mmio + NV_ADMA_STAT);
659                 count++;
660         }
661         if (count == 20)
662                 ata_port_printk(ap, KERN_WARNING,
663                         "timeout waiting for ADMA IDLE, stat=0x%hx\n",
664                         status);
665
666         tmp = readw(mmio + NV_ADMA_CTL);
667         writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
668
669         count = 0;
670         status = readw(mmio + NV_ADMA_STAT);
671         while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
672                 ndelay(50);
673                 status = readw(mmio + NV_ADMA_STAT);
674                 count++;
675         }
676         if (count == 20)
677                 ata_port_printk(ap, KERN_WARNING,
678                          "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
679                          status);
680
681         pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
682 }
683
684 static void nv_adma_mode(struct ata_port *ap)
685 {
686         struct nv_adma_port_priv *pp = ap->private_data;
687         void __iomem *mmio = pp->ctl_block;
688         u16 tmp, status;
689         int count = 0;
690
691         if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
692                 return;
693
694         WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
695
696         tmp = readw(mmio + NV_ADMA_CTL);
697         writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
698
699         status = readw(mmio + NV_ADMA_STAT);
700         while (((status & NV_ADMA_STAT_LEGACY) ||
701               !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
702                 ndelay(50);
703                 status = readw(mmio + NV_ADMA_STAT);
704                 count++;
705         }
706         if (count == 20)
707                 ata_port_printk(ap, KERN_WARNING,
708                         "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
709                         status);
710
711         pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
712 }
713
714 static int nv_adma_slave_config(struct scsi_device *sdev)
715 {
716         struct ata_port *ap = ata_shost_to_port(sdev->host);
717         struct nv_adma_port_priv *pp = ap->private_data;
718         struct pci_dev *pdev = to_pci_dev(ap->host->dev);
719         u64 bounce_limit;
720         unsigned long segment_boundary;
721         unsigned short sg_tablesize;
722         int rc;
723         int adma_enable;
724         u32 current_reg, new_reg, config_mask;
725
726         rc = ata_scsi_slave_config(sdev);
727
728         if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
729                 /* Not a proper libata device, ignore */
730                 return rc;
731
732         if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
733                 /*
734                  * NVIDIA reports that ADMA mode does not support ATAPI commands.
735                  * Therefore ATAPI commands are sent through the legacy interface.
736                  * However, the legacy interface only supports 32-bit DMA.
737                  * Restrict DMA parameters as required by the legacy interface
738                  * when an ATAPI device is connected.
739                  */
740                 bounce_limit = ATA_DMA_MASK;
741                 segment_boundary = ATA_DMA_BOUNDARY;
742                 /* Subtract 1 since an extra entry may be needed for padding, see
743                    libata-scsi.c */
744                 sg_tablesize = LIBATA_MAX_PRD - 1;
745
746                 /* Since the legacy DMA engine is in use, we need to disable ADMA
747                    on the port. */
748                 adma_enable = 0;
749                 nv_adma_register_mode(ap);
750         } else {
751                 bounce_limit = *ap->dev->dma_mask;
752                 segment_boundary = NV_ADMA_DMA_BOUNDARY;
753                 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
754                 adma_enable = 1;
755         }
756
757         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
758
759         if (ap->port_no == 1)
760                 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
761                               NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
762         else
763                 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
764                               NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
765
766         if (adma_enable) {
767                 new_reg = current_reg | config_mask;
768                 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
769         } else {
770                 new_reg = current_reg & ~config_mask;
771                 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
772         }
773
774         if (current_reg != new_reg)
775                 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
776
777         blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
778         blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
779         blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
780         ata_port_printk(ap, KERN_INFO,
781                 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
782                 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
783         return rc;
784 }
785
786 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
787 {
788         struct nv_adma_port_priv *pp = qc->ap->private_data;
789         return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
790 }
791
792 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
793 {
794         /* Other than when internal or pass-through commands are executed,
795            the only time this function will be called in ADMA mode will be
796            if a command fails. In the failure case we don't care about going
797            into register mode with ADMA commands pending, as the commands will
798            all shortly be aborted anyway. We assume that NCQ commands are not
799            issued via passthrough, which is the only way that switching into
800            ADMA mode could abort outstanding commands. */
801         nv_adma_register_mode(ap);
802
803         ata_tf_read(ap, tf);
804 }
805
806 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
807 {
808         unsigned int idx = 0;
809
810         if (tf->flags & ATA_TFLAG_ISADDR) {
811                 if (tf->flags & ATA_TFLAG_LBA48) {
812                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
813                         cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
814                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
815                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
816                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
817                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
818                 } else
819                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
820
821                 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
822                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
823                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
824                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
825         }
826
827         if (tf->flags & ATA_TFLAG_DEVICE)
828                 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
829
830         cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
831
832         while (idx < 12)
833                 cpb[idx++] = cpu_to_le16(IGN);
834
835         return idx;
836 }
837
838 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
839 {
840         struct nv_adma_port_priv *pp = ap->private_data;
841         u8 flags = pp->cpb[cpb_num].resp_flags;
842
843         VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
844
845         if (unlikely((force_err ||
846                      flags & (NV_CPB_RESP_ATA_ERR |
847                               NV_CPB_RESP_CMD_ERR |
848                               NV_CPB_RESP_CPB_ERR)))) {
849                 struct ata_eh_info *ehi = &ap->link.eh_info;
850                 int freeze = 0;
851
852                 ata_ehi_clear_desc(ehi);
853                 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
854                 if (flags & NV_CPB_RESP_ATA_ERR) {
855                         ata_ehi_push_desc(ehi, "ATA error");
856                         ehi->err_mask |= AC_ERR_DEV;
857                 } else if (flags & NV_CPB_RESP_CMD_ERR) {
858                         ata_ehi_push_desc(ehi, "CMD error");
859                         ehi->err_mask |= AC_ERR_DEV;
860                 } else if (flags & NV_CPB_RESP_CPB_ERR) {
861                         ata_ehi_push_desc(ehi, "CPB error");
862                         ehi->err_mask |= AC_ERR_SYSTEM;
863                         freeze = 1;
864                 } else {
865                         /* notifier error, but no error in CPB flags? */
866                         ata_ehi_push_desc(ehi, "unknown");
867                         ehi->err_mask |= AC_ERR_OTHER;
868                         freeze = 1;
869                 }
870                 /* Kill all commands. EH will determine what actually failed. */
871                 if (freeze)
872                         ata_port_freeze(ap);
873                 else
874                         ata_port_abort(ap);
875                 return 1;
876         }
877
878         if (likely(flags & NV_CPB_RESP_DONE)) {
879                 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
880                 VPRINTK("CPB flags done, flags=0x%x\n", flags);
881                 if (likely(qc)) {
882                         DPRINTK("Completing qc from tag %d\n", cpb_num);
883                         ata_qc_complete(qc);
884                 } else {
885                         struct ata_eh_info *ehi = &ap->link.eh_info;
886                         /* Notifier bits set without a command may indicate the drive
887                            is misbehaving. Raise host state machine violation on this
888                            condition. */
889                         ata_port_printk(ap, KERN_ERR,
890                                         "notifier for tag %d with no cmd?\n",
891                                         cpb_num);
892                         ehi->err_mask |= AC_ERR_HSM;
893                         ehi->action |= ATA_EH_SOFTRESET;
894                         ata_port_freeze(ap);
895                         return 1;
896                 }
897         }
898         return 0;
899 }
900
901 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
902 {
903         struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
904
905         /* freeze if hotplugged */
906         if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
907                 ata_port_freeze(ap);
908                 return 1;
909         }
910
911         /* bail out if not our interrupt */
912         if (!(irq_stat & NV_INT_DEV))
913                 return 0;
914
915         /* DEV interrupt w/ no active qc? */
916         if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
917                 ata_check_status(ap);
918                 return 1;
919         }
920
921         /* handle interrupt */
922         return ata_host_intr(ap, qc);
923 }
924
925 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
926 {
927         struct ata_host *host = dev_instance;
928         int i, handled = 0;
929         u32 notifier_clears[2];
930
931         spin_lock(&host->lock);
932
933         for (i = 0; i < host->n_ports; i++) {
934                 struct ata_port *ap = host->ports[i];
935                 notifier_clears[i] = 0;
936
937                 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
938                         struct nv_adma_port_priv *pp = ap->private_data;
939                         void __iomem *mmio = pp->ctl_block;
940                         u16 status;
941                         u32 gen_ctl;
942                         u32 notifier, notifier_error;
943
944                         /* if ADMA is disabled, use standard ata interrupt handler */
945                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
946                                 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
947                                         >> (NV_INT_PORT_SHIFT * i);
948                                 handled += nv_host_intr(ap, irq_stat);
949                                 continue;
950                         }
951
952                         /* if in ATA register mode, check for standard interrupts */
953                         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
954                                 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
955                                         >> (NV_INT_PORT_SHIFT * i);
956                                 if (ata_tag_valid(ap->link.active_tag))
957                                         /** NV_INT_DEV indication seems unreliable at times
958                                             at least in ADMA mode. Force it on always when a
959                                             command is active, to prevent losing interrupts. */
960                                         irq_stat |= NV_INT_DEV;
961                                 handled += nv_host_intr(ap, irq_stat);
962                         }
963
964                         notifier = readl(mmio + NV_ADMA_NOTIFIER);
965                         notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
966                         notifier_clears[i] = notifier | notifier_error;
967
968                         gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
969
970                         if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
971                             !notifier_error)
972                                 /* Nothing to do */
973                                 continue;
974
975                         status = readw(mmio + NV_ADMA_STAT);
976
977                         /* Clear status. Ensure the controller sees the clearing before we start
978                            looking at any of the CPB statuses, so that any CPB completions after
979                            this point in the handler will raise another interrupt. */
980                         writew(status, mmio + NV_ADMA_STAT);
981                         readw(mmio + NV_ADMA_STAT); /* flush posted write */
982                         rmb();
983
984                         handled++; /* irq handled if we got here */
985
986                         /* freeze if hotplugged or controller error */
987                         if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
988                                                NV_ADMA_STAT_HOTUNPLUG |
989                                                NV_ADMA_STAT_TIMEOUT |
990                                                NV_ADMA_STAT_SERROR))) {
991                                 struct ata_eh_info *ehi = &ap->link.eh_info;
992
993                                 ata_ehi_clear_desc(ehi);
994                                 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
995                                 if (status & NV_ADMA_STAT_TIMEOUT) {
996                                         ehi->err_mask |= AC_ERR_SYSTEM;
997                                         ata_ehi_push_desc(ehi, "timeout");
998                                 } else if (status & NV_ADMA_STAT_HOTPLUG) {
999                                         ata_ehi_hotplugged(ehi);
1000                                         ata_ehi_push_desc(ehi, "hotplug");
1001                                 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1002                                         ata_ehi_hotplugged(ehi);
1003                                         ata_ehi_push_desc(ehi, "hot unplug");
1004                                 } else if (status & NV_ADMA_STAT_SERROR) {
1005                                         /* let libata analyze SError and figure out the cause */
1006                                         ata_ehi_push_desc(ehi, "SError");
1007                                 } else
1008                                         ata_ehi_push_desc(ehi, "unknown");
1009                                 ata_port_freeze(ap);
1010                                 continue;
1011                         }
1012
1013                         if (status & (NV_ADMA_STAT_DONE |
1014                                       NV_ADMA_STAT_CPBERR)) {
1015                                 u32 check_commands;
1016                                 int pos, error = 0;
1017
1018                                 if (ata_tag_valid(ap->link.active_tag))
1019                                         check_commands = 1 << ap->link.active_tag;
1020                                 else
1021                                         check_commands = ap->link.sactive;
1022
1023                                 /** Check CPBs for completed commands */
1024                                 while ((pos = ffs(check_commands)) && !error) {
1025                                         pos--;
1026                                         error = nv_adma_check_cpb(ap, pos,
1027                                                 notifier_error & (1 << pos));
1028                                         check_commands &= ~(1 << pos);
1029                                 }
1030                         }
1031                 }
1032         }
1033
1034         if (notifier_clears[0] || notifier_clears[1]) {
1035                 /* Note: Both notifier clear registers must be written
1036                    if either is set, even if one is zero, according to NVIDIA. */
1037                 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1038                 writel(notifier_clears[0], pp->notifier_clear_block);
1039                 pp = host->ports[1]->private_data;
1040                 writel(notifier_clears[1], pp->notifier_clear_block);
1041         }
1042
1043         spin_unlock(&host->lock);
1044
1045         return IRQ_RETVAL(handled);
1046 }
1047
1048 static void nv_adma_freeze(struct ata_port *ap)
1049 {
1050         struct nv_adma_port_priv *pp = ap->private_data;
1051         void __iomem *mmio = pp->ctl_block;
1052         u16 tmp;
1053
1054         nv_ck804_freeze(ap);
1055
1056         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1057                 return;
1058
1059         /* clear any outstanding CK804 notifications */
1060         writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1061                 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1062
1063         /* Disable interrupt */
1064         tmp = readw(mmio + NV_ADMA_CTL);
1065         writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1066                 mmio + NV_ADMA_CTL);
1067         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1068 }
1069
1070 static void nv_adma_thaw(struct ata_port *ap)
1071 {
1072         struct nv_adma_port_priv *pp = ap->private_data;
1073         void __iomem *mmio = pp->ctl_block;
1074         u16 tmp;
1075
1076         nv_ck804_thaw(ap);
1077
1078         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1079                 return;
1080
1081         /* Enable interrupt */
1082         tmp = readw(mmio + NV_ADMA_CTL);
1083         writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1084                 mmio + NV_ADMA_CTL);
1085         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1086 }
1087
1088 static void nv_adma_irq_clear(struct ata_port *ap)
1089 {
1090         struct nv_adma_port_priv *pp = ap->private_data;
1091         void __iomem *mmio = pp->ctl_block;
1092         u32 notifier_clears[2];
1093
1094         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1095                 ata_bmdma_irq_clear(ap);
1096                 return;
1097         }
1098
1099         /* clear any outstanding CK804 notifications */
1100         writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1101                 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1102
1103         /* clear ADMA status */
1104         writew(0xffff, mmio + NV_ADMA_STAT);
1105
1106         /* clear notifiers - note both ports need to be written with
1107            something even though we are only clearing on one */
1108         if (ap->port_no == 0) {
1109                 notifier_clears[0] = 0xFFFFFFFF;
1110                 notifier_clears[1] = 0;
1111         } else {
1112                 notifier_clears[0] = 0;
1113                 notifier_clears[1] = 0xFFFFFFFF;
1114         }
1115         pp = ap->host->ports[0]->private_data;
1116         writel(notifier_clears[0], pp->notifier_clear_block);
1117         pp = ap->host->ports[1]->private_data;
1118         writel(notifier_clears[1], pp->notifier_clear_block);
1119 }
1120
1121 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1122 {
1123         struct nv_adma_port_priv *pp = qc->ap->private_data;
1124
1125         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1126                 ata_bmdma_post_internal_cmd(qc);
1127 }
1128
1129 static int nv_adma_port_start(struct ata_port *ap)
1130 {
1131         struct device *dev = ap->host->dev;
1132         struct nv_adma_port_priv *pp;
1133         int rc;
1134         void *mem;
1135         dma_addr_t mem_dma;
1136         void __iomem *mmio;
1137         u16 tmp;
1138
1139         VPRINTK("ENTER\n");
1140
1141         rc = ata_port_start(ap);
1142         if (rc)
1143                 return rc;
1144
1145         pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1146         if (!pp)
1147                 return -ENOMEM;
1148
1149         mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1150                ap->port_no * NV_ADMA_PORT_SIZE;
1151         pp->ctl_block = mmio;
1152         pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1153         pp->notifier_clear_block = pp->gen_block +
1154                NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1155
1156         mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1157                                   &mem_dma, GFP_KERNEL);
1158         if (!mem)
1159                 return -ENOMEM;
1160         memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1161
1162         /*
1163          * First item in chunk of DMA memory:
1164          * 128-byte command parameter block (CPB)
1165          * one for each command tag
1166          */
1167         pp->cpb     = mem;
1168         pp->cpb_dma = mem_dma;
1169
1170         writel(mem_dma & 0xFFFFFFFF,    mmio + NV_ADMA_CPB_BASE_LOW);
1171         writel((mem_dma >> 16) >> 16,   mmio + NV_ADMA_CPB_BASE_HIGH);
1172
1173         mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1174         mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1175
1176         /*
1177          * Second item: block of ADMA_SGTBL_LEN s/g entries
1178          */
1179         pp->aprd = mem;
1180         pp->aprd_dma = mem_dma;
1181
1182         ap->private_data = pp;
1183
1184         /* clear any outstanding interrupt conditions */
1185         writew(0xffff, mmio + NV_ADMA_STAT);
1186
1187         /* initialize port variables */
1188         pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1189
1190         /* clear CPB fetch count */
1191         writew(0, mmio + NV_ADMA_CPB_COUNT);
1192
1193         /* clear GO for register mode, enable interrupt */
1194         tmp = readw(mmio + NV_ADMA_CTL);
1195         writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1196                 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1197
1198         tmp = readw(mmio + NV_ADMA_CTL);
1199         writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1200         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1201         udelay(1);
1202         writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1203         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1204
1205         return 0;
1206 }
1207
1208 static void nv_adma_port_stop(struct ata_port *ap)
1209 {
1210         struct nv_adma_port_priv *pp = ap->private_data;
1211         void __iomem *mmio = pp->ctl_block;
1212
1213         VPRINTK("ENTER\n");
1214         writew(0, mmio + NV_ADMA_CTL);
1215 }
1216
1217 #ifdef CONFIG_PM
1218 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1219 {
1220         struct nv_adma_port_priv *pp = ap->private_data;
1221         void __iomem *mmio = pp->ctl_block;
1222
1223         /* Go to register mode - clears GO */
1224         nv_adma_register_mode(ap);
1225
1226         /* clear CPB fetch count */
1227         writew(0, mmio + NV_ADMA_CPB_COUNT);
1228
1229         /* disable interrupt, shut down port */
1230         writew(0, mmio + NV_ADMA_CTL);
1231
1232         return 0;
1233 }
1234
1235 static int nv_adma_port_resume(struct ata_port *ap)
1236 {
1237         struct nv_adma_port_priv *pp = ap->private_data;
1238         void __iomem *mmio = pp->ctl_block;
1239         u16 tmp;
1240
1241         /* set CPB block location */
1242         writel(pp->cpb_dma & 0xFFFFFFFF,        mmio + NV_ADMA_CPB_BASE_LOW);
1243         writel((pp->cpb_dma >> 16) >> 16,       mmio + NV_ADMA_CPB_BASE_HIGH);
1244
1245         /* clear any outstanding interrupt conditions */
1246         writew(0xffff, mmio + NV_ADMA_STAT);
1247
1248         /* initialize port variables */
1249         pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1250
1251         /* clear CPB fetch count */
1252         writew(0, mmio + NV_ADMA_CPB_COUNT);
1253
1254         /* clear GO for register mode, enable interrupt */
1255         tmp = readw(mmio + NV_ADMA_CTL);
1256         writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1257                 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1258
1259         tmp = readw(mmio + NV_ADMA_CTL);
1260         writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1261         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1262         udelay(1);
1263         writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1264         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1265
1266         return 0;
1267 }
1268 #endif
1269
1270 static void nv_adma_setup_port(struct ata_port *ap)
1271 {
1272         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1273         struct ata_ioports *ioport = &ap->ioaddr;
1274
1275         VPRINTK("ENTER\n");
1276
1277         mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1278
1279         ioport->cmd_addr        = mmio;
1280         ioport->data_addr       = mmio + (ATA_REG_DATA * 4);
1281         ioport->error_addr      =
1282         ioport->feature_addr    = mmio + (ATA_REG_ERR * 4);
1283         ioport->nsect_addr      = mmio + (ATA_REG_NSECT * 4);
1284         ioport->lbal_addr       = mmio + (ATA_REG_LBAL * 4);
1285         ioport->lbam_addr       = mmio + (ATA_REG_LBAM * 4);
1286         ioport->lbah_addr       = mmio + (ATA_REG_LBAH * 4);
1287         ioport->device_addr     = mmio + (ATA_REG_DEVICE * 4);
1288         ioport->status_addr     =
1289         ioport->command_addr    = mmio + (ATA_REG_STATUS * 4);
1290         ioport->altstatus_addr  =
1291         ioport->ctl_addr        = mmio + 0x20;
1292 }
1293
1294 static int nv_adma_host_init(struct ata_host *host)
1295 {
1296         struct pci_dev *pdev = to_pci_dev(host->dev);
1297         unsigned int i;
1298         u32 tmp32;
1299
1300         VPRINTK("ENTER\n");
1301
1302         /* enable ADMA on the ports */
1303         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1304         tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1305                  NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1306                  NV_MCP_SATA_CFG_20_PORT1_EN |
1307                  NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1308
1309         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1310
1311         for (i = 0; i < host->n_ports; i++)
1312                 nv_adma_setup_port(host->ports[i]);
1313
1314         return 0;
1315 }
1316
1317 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1318                               struct scatterlist *sg,
1319                               int idx,
1320                               struct nv_adma_prd *aprd)
1321 {
1322         u8 flags = 0;
1323         if (qc->tf.flags & ATA_TFLAG_WRITE)
1324                 flags |= NV_APRD_WRITE;
1325         if (idx == qc->n_elem - 1)
1326                 flags |= NV_APRD_END;
1327         else if (idx != 4)
1328                 flags |= NV_APRD_CONT;
1329
1330         aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1331         aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1332         aprd->flags = flags;
1333         aprd->packet_len = 0;
1334 }
1335
1336 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1337 {
1338         struct nv_adma_port_priv *pp = qc->ap->private_data;
1339         struct nv_adma_prd *aprd;
1340         struct scatterlist *sg;
1341         unsigned int si;
1342
1343         VPRINTK("ENTER\n");
1344
1345         for_each_sg(qc->sg, sg, qc->n_elem, si) {
1346                 aprd = (si < 5) ? &cpb->aprd[si] :
1347                                &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1348                 nv_adma_fill_aprd(qc, sg, si, aprd);
1349         }
1350         if (si > 5)
1351                 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1352         else
1353                 cpb->next_aprd = cpu_to_le64(0);
1354 }
1355
1356 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1357 {
1358         struct nv_adma_port_priv *pp = qc->ap->private_data;
1359
1360         /* ADMA engine can only be used for non-ATAPI DMA commands,
1361            or interrupt-driven no-data commands. */
1362         if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1363            (qc->tf.flags & ATA_TFLAG_POLLING))
1364                 return 1;
1365
1366         if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1367            (qc->tf.protocol == ATA_PROT_NODATA))
1368                 return 0;
1369
1370         return 1;
1371 }
1372
1373 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1374 {
1375         struct nv_adma_port_priv *pp = qc->ap->private_data;
1376         struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1377         u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1378                        NV_CPB_CTL_IEN;
1379
1380         if (nv_adma_use_reg_mode(qc)) {
1381                 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1382                         (qc->flags & ATA_QCFLAG_DMAMAP));
1383                 nv_adma_register_mode(qc->ap);
1384                 ata_qc_prep(qc);
1385                 return;
1386         }
1387
1388         cpb->resp_flags = NV_CPB_RESP_DONE;
1389         wmb();
1390         cpb->ctl_flags = 0;
1391         wmb();
1392
1393         cpb->len                = 3;
1394         cpb->tag                = qc->tag;
1395         cpb->next_cpb_idx       = 0;
1396
1397         /* turn on NCQ flags for NCQ commands */
1398         if (qc->tf.protocol == ATA_PROT_NCQ)
1399                 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1400
1401         VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1402
1403         nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1404
1405         if (qc->flags & ATA_QCFLAG_DMAMAP) {
1406                 nv_adma_fill_sg(qc, cpb);
1407                 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1408         } else
1409                 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1410
1411         /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1412            until we are finished filling in all of the contents */
1413         wmb();
1414         cpb->ctl_flags = ctl_flags;
1415         wmb();
1416         cpb->resp_flags = 0;
1417 }
1418
1419 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1420 {
1421         struct nv_adma_port_priv *pp = qc->ap->private_data;
1422         void __iomem *mmio = pp->ctl_block;
1423         int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1424
1425         VPRINTK("ENTER\n");
1426
1427         /* We can't handle result taskfile with NCQ commands, since
1428            retrieving the taskfile switches us out of ADMA mode and would abort
1429            existing commands. */
1430         if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1431                      (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1432                 ata_dev_printk(qc->dev, KERN_ERR,
1433                         "NCQ w/ RESULT_TF not allowed\n");
1434                 return AC_ERR_SYSTEM;
1435         }
1436
1437         if (nv_adma_use_reg_mode(qc)) {
1438                 /* use ATA register mode */
1439                 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1440                 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1441                         (qc->flags & ATA_QCFLAG_DMAMAP));
1442                 nv_adma_register_mode(qc->ap);
1443                 return ata_qc_issue_prot(qc);
1444         } else
1445                 nv_adma_mode(qc->ap);
1446
1447         /* write append register, command tag in lower 8 bits
1448            and (number of cpbs to append -1) in top 8 bits */
1449         wmb();
1450
1451         if (curr_ncq != pp->last_issue_ncq) {
1452                 /* Seems to need some delay before switching between NCQ and
1453                    non-NCQ commands, else we get command timeouts and such. */
1454                 udelay(20);
1455                 pp->last_issue_ncq = curr_ncq;
1456         }
1457
1458         writew(qc->tag, mmio + NV_ADMA_APPEND);
1459
1460         DPRINTK("Issued tag %u\n", qc->tag);
1461
1462         return 0;
1463 }
1464
1465 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1466 {
1467         struct ata_host *host = dev_instance;
1468         unsigned int i;
1469         unsigned int handled = 0;
1470         unsigned long flags;
1471
1472         spin_lock_irqsave(&host->lock, flags);
1473
1474         for (i = 0; i < host->n_ports; i++) {
1475                 struct ata_port *ap;
1476
1477                 ap = host->ports[i];
1478                 if (ap &&
1479                     !(ap->flags & ATA_FLAG_DISABLED)) {
1480                         struct ata_queued_cmd *qc;
1481
1482                         qc = ata_qc_from_tag(ap, ap->link.active_tag);
1483                         if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1484                                 handled += ata_host_intr(ap, qc);
1485                         else
1486                                 // No request pending?  Clear interrupt status
1487                                 // anyway, in case there's one pending.
1488                                 ap->ops->check_status(ap);
1489                 }
1490
1491         }
1492
1493         spin_unlock_irqrestore(&host->lock, flags);
1494
1495         return IRQ_RETVAL(handled);
1496 }
1497
1498 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1499 {
1500         int i, handled = 0;
1501
1502         for (i = 0; i < host->n_ports; i++) {
1503                 struct ata_port *ap = host->ports[i];
1504
1505                 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1506                         handled += nv_host_intr(ap, irq_stat);
1507
1508                 irq_stat >>= NV_INT_PORT_SHIFT;
1509         }
1510
1511         return IRQ_RETVAL(handled);
1512 }
1513
1514 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1515 {
1516         struct ata_host *host = dev_instance;
1517         u8 irq_stat;
1518         irqreturn_t ret;
1519
1520         spin_lock(&host->lock);
1521         irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1522         ret = nv_do_interrupt(host, irq_stat);
1523         spin_unlock(&host->lock);
1524
1525         return ret;
1526 }
1527
1528 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1529 {
1530         struct ata_host *host = dev_instance;
1531         u8 irq_stat;
1532         irqreturn_t ret;
1533
1534         spin_lock(&host->lock);
1535         irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1536         ret = nv_do_interrupt(host, irq_stat);
1537         spin_unlock(&host->lock);
1538
1539         return ret;
1540 }
1541
1542 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1543 {
1544         if (sc_reg > SCR_CONTROL)
1545                 return -EINVAL;
1546
1547         *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1548         return 0;
1549 }
1550
1551 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1552 {
1553         if (sc_reg > SCR_CONTROL)
1554                 return -EINVAL;
1555
1556         iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1557         return 0;
1558 }
1559
1560 static void nv_nf2_freeze(struct ata_port *ap)
1561 {
1562         void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1563         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1564         u8 mask;
1565
1566         mask = ioread8(scr_addr + NV_INT_ENABLE);
1567         mask &= ~(NV_INT_ALL << shift);
1568         iowrite8(mask, scr_addr + NV_INT_ENABLE);
1569 }
1570
1571 static void nv_nf2_thaw(struct ata_port *ap)
1572 {
1573         void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1574         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1575         u8 mask;
1576
1577         iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1578
1579         mask = ioread8(scr_addr + NV_INT_ENABLE);
1580         mask |= (NV_INT_MASK << shift);
1581         iowrite8(mask, scr_addr + NV_INT_ENABLE);
1582 }
1583
1584 static void nv_ck804_freeze(struct ata_port *ap)
1585 {
1586         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1587         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1588         u8 mask;
1589
1590         mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1591         mask &= ~(NV_INT_ALL << shift);
1592         writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1593 }
1594
1595 static void nv_ck804_thaw(struct ata_port *ap)
1596 {
1597         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1598         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1599         u8 mask;
1600
1601         writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1602
1603         mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1604         mask |= (NV_INT_MASK << shift);
1605         writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1606 }
1607
1608 static void nv_mcp55_freeze(struct ata_port *ap)
1609 {
1610         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1611         int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1612         u32 mask;
1613
1614         writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1615
1616         mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1617         mask &= ~(NV_INT_ALL_MCP55 << shift);
1618         writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1619         ata_bmdma_freeze(ap);
1620 }
1621
1622 static void nv_mcp55_thaw(struct ata_port *ap)
1623 {
1624         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1625         int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1626         u32 mask;
1627
1628         writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1629
1630         mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1631         mask |= (NV_INT_MASK_MCP55 << shift);
1632         writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1633         ata_bmdma_thaw(ap);
1634 }
1635
1636 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1637                         unsigned long deadline)
1638 {
1639         unsigned int dummy;
1640
1641         /* SATA hardreset fails to retrieve proper device signature on
1642          * some controllers.  Don't classify on hardreset.  For more
1643          * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1644          */
1645         return sata_std_hardreset(link, &dummy, deadline);
1646 }
1647
1648 static void nv_error_handler(struct ata_port *ap)
1649 {
1650         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1651                            nv_hardreset, ata_std_postreset);
1652 }
1653
1654 static void nv_adma_error_handler(struct ata_port *ap)
1655 {
1656         struct nv_adma_port_priv *pp = ap->private_data;
1657         if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1658                 void __iomem *mmio = pp->ctl_block;
1659                 int i;
1660                 u16 tmp;
1661
1662                 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1663                         u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1664                         u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1665                         u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1666                         u32 status = readw(mmio + NV_ADMA_STAT);
1667                         u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1668                         u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1669
1670                         ata_port_printk(ap, KERN_ERR,
1671                                 "EH in ADMA mode, notifier 0x%X "
1672                                 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1673                                 "next cpb count 0x%X next cpb idx 0x%x\n",
1674                                 notifier, notifier_error, gen_ctl, status,
1675                                 cpb_count, next_cpb_idx);
1676
1677                         for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1678                                 struct nv_adma_cpb *cpb = &pp->cpb[i];
1679                                 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1680                                     ap->link.sactive & (1 << i))
1681                                         ata_port_printk(ap, KERN_ERR,
1682                                                 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1683                                                 i, cpb->ctl_flags, cpb->resp_flags);
1684                         }
1685                 }
1686
1687                 /* Push us back into port register mode for error handling. */
1688                 nv_adma_register_mode(ap);
1689
1690                 /* Mark all of the CPBs as invalid to prevent them from
1691                    being executed */
1692                 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1693                         pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1694
1695                 /* clear CPB fetch count */
1696                 writew(0, mmio + NV_ADMA_CPB_COUNT);
1697
1698                 /* Reset channel */
1699                 tmp = readw(mmio + NV_ADMA_CTL);
1700                 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1701                 readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1702                 udelay(1);
1703                 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1704                 readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1705         }
1706
1707         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1708                            nv_hardreset, ata_std_postreset);
1709 }
1710
1711 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1712 {
1713         struct nv_swncq_port_priv *pp = ap->private_data;
1714         struct defer_queue *dq = &pp->defer_queue;
1715
1716         /* queue is full */
1717         WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1718         dq->defer_bits |= (1 << qc->tag);
1719         dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1720 }
1721
1722 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1723 {
1724         struct nv_swncq_port_priv *pp = ap->private_data;
1725         struct defer_queue *dq = &pp->defer_queue;
1726         unsigned int tag;
1727
1728         if (dq->head == dq->tail)       /* null queue */
1729                 return NULL;
1730
1731         tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1732         dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1733         WARN_ON(!(dq->defer_bits & (1 << tag)));
1734         dq->defer_bits &= ~(1 << tag);
1735
1736         return ata_qc_from_tag(ap, tag);
1737 }
1738
1739 static void nv_swncq_fis_reinit(struct ata_port *ap)
1740 {
1741         struct nv_swncq_port_priv *pp = ap->private_data;
1742
1743         pp->dhfis_bits = 0;
1744         pp->dmafis_bits = 0;
1745         pp->sdbfis_bits = 0;
1746         pp->ncq_flags = 0;
1747 }
1748
1749 static void nv_swncq_pp_reinit(struct ata_port *ap)
1750 {
1751         struct nv_swncq_port_priv *pp = ap->private_data;
1752         struct defer_queue *dq = &pp->defer_queue;
1753
1754         dq->head = 0;
1755         dq->tail = 0;
1756         dq->defer_bits = 0;
1757         pp->qc_active = 0;
1758         pp->last_issue_tag = ATA_TAG_POISON;
1759         nv_swncq_fis_reinit(ap);
1760 }
1761
1762 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1763 {
1764         struct nv_swncq_port_priv *pp = ap->private_data;
1765
1766         writew(fis, pp->irq_block);
1767 }
1768
1769 static void __ata_bmdma_stop(struct ata_port *ap)
1770 {
1771         struct ata_queued_cmd qc;
1772
1773         qc.ap = ap;
1774         ata_bmdma_stop(&qc);
1775 }
1776
1777 static void nv_swncq_ncq_stop(struct ata_port *ap)
1778 {
1779         struct nv_swncq_port_priv *pp = ap->private_data;
1780         unsigned int i;
1781         u32 sactive;
1782         u32 done_mask;
1783
1784         ata_port_printk(ap, KERN_ERR,
1785                         "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1786                         ap->qc_active, ap->link.sactive);
1787         ata_port_printk(ap, KERN_ERR,
1788                 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1789                 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1790                 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1791                 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1792
1793         ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1794                         ap->ops->check_status(ap),
1795                         ioread8(ap->ioaddr.error_addr));
1796
1797         sactive = readl(pp->sactive_block);
1798         done_mask = pp->qc_active ^ sactive;
1799
1800         ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1801         for (i = 0; i < ATA_MAX_QUEUE; i++) {
1802                 u8 err = 0;
1803                 if (pp->qc_active & (1 << i))
1804                         err = 0;
1805                 else if (done_mask & (1 << i))
1806                         err = 1;
1807                 else
1808                         continue;
1809
1810                 ata_port_printk(ap, KERN_ERR,
1811                                 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1812                                 (pp->dhfis_bits >> i) & 0x1,
1813                                 (pp->dmafis_bits >> i) & 0x1,
1814                                 (pp->sdbfis_bits >> i) & 0x1,
1815                                 (sactive >> i) & 0x1,
1816                                 (err ? "error! tag doesn't exit" : " "));
1817         }
1818
1819         nv_swncq_pp_reinit(ap);
1820         ap->ops->irq_clear(ap);
1821         __ata_bmdma_stop(ap);
1822         nv_swncq_irq_clear(ap, 0xffff);
1823 }
1824
1825 static void nv_swncq_error_handler(struct ata_port *ap)
1826 {
1827         struct ata_eh_context *ehc = &ap->link.eh_context;
1828
1829         if (ap->link.sactive) {
1830                 nv_swncq_ncq_stop(ap);
1831                 ehc->i.action |= ATA_EH_HARDRESET;
1832         }
1833
1834         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1835                            nv_hardreset, ata_std_postreset);
1836 }
1837
1838 #ifdef CONFIG_PM
1839 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1840 {
1841         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1842         u32 tmp;
1843
1844         /* clear irq */
1845         writel(~0, mmio + NV_INT_STATUS_MCP55);
1846
1847         /* disable irq */
1848         writel(0, mmio + NV_INT_ENABLE_MCP55);
1849
1850         /* disable swncq */
1851         tmp = readl(mmio + NV_CTL_MCP55);
1852         tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1853         writel(tmp, mmio + NV_CTL_MCP55);
1854
1855         return 0;
1856 }
1857
1858 static int nv_swncq_port_resume(struct ata_port *ap)
1859 {
1860         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1861         u32 tmp;
1862
1863         /* clear irq */
1864         writel(~0, mmio + NV_INT_STATUS_MCP55);
1865
1866         /* enable irq */
1867         writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1868
1869         /* enable swncq */
1870         tmp = readl(mmio + NV_CTL_MCP55);
1871         writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1872
1873         return 0;
1874 }
1875 #endif
1876
1877 static void nv_swncq_host_init(struct ata_host *host)
1878 {
1879         u32 tmp;
1880         void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1881         struct pci_dev *pdev = to_pci_dev(host->dev);
1882         u8 regval;
1883
1884         /* disable  ECO 398 */
1885         pci_read_config_byte(pdev, 0x7f, &regval);
1886         regval &= ~(1 << 7);
1887         pci_write_config_byte(pdev, 0x7f, regval);
1888
1889         /* enable swncq */
1890         tmp = readl(mmio + NV_CTL_MCP55);
1891         VPRINTK("HOST_CTL:0x%X\n", tmp);
1892         writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1893
1894         /* enable irq intr */
1895         tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1896         VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1897         writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1898
1899         /*  clear port irq */
1900         writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1901 }
1902
1903 static int nv_swncq_slave_config(struct scsi_device *sdev)
1904 {
1905         struct ata_port *ap = ata_shost_to_port(sdev->host);
1906         struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1907         struct ata_device *dev;
1908         int rc;
1909         u8 rev;
1910         u8 check_maxtor = 0;
1911         unsigned char model_num[ATA_ID_PROD_LEN + 1];
1912
1913         rc = ata_scsi_slave_config(sdev);
1914         if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1915                 /* Not a proper libata device, ignore */
1916                 return rc;
1917
1918         dev = &ap->link.device[sdev->id];
1919         if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1920                 return rc;
1921
1922         /* if MCP51 and Maxtor, then disable ncq */
1923         if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1924                 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1925                 check_maxtor = 1;
1926
1927         /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1928         if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1929                 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1930                 pci_read_config_byte(pdev, 0x8, &rev);
1931                 if (rev <= 0xa2)
1932                         check_maxtor = 1;
1933         }
1934
1935         if (!check_maxtor)
1936                 return rc;
1937
1938         ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1939
1940         if (strncmp(model_num, "Maxtor", 6) == 0) {
1941                 ata_scsi_change_queue_depth(sdev, 1);
1942                 ata_dev_printk(dev, KERN_NOTICE,
1943                         "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1944         }
1945
1946         return rc;
1947 }
1948
1949 static int nv_swncq_port_start(struct ata_port *ap)
1950 {
1951         struct device *dev = ap->host->dev;
1952         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1953         struct nv_swncq_port_priv *pp;
1954         int rc;
1955
1956         rc = ata_port_start(ap);
1957         if (rc)
1958                 return rc;
1959
1960         pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1961         if (!pp)
1962                 return -ENOMEM;
1963
1964         pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1965                                       &pp->prd_dma, GFP_KERNEL);
1966         if (!pp->prd)
1967                 return -ENOMEM;
1968         memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1969
1970         ap->private_data = pp;
1971         pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1972         pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1973         pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1974
1975         return 0;
1976 }
1977
1978 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1979 {
1980         if (qc->tf.protocol != ATA_PROT_NCQ) {
1981                 ata_qc_prep(qc);
1982                 return;
1983         }
1984
1985         if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1986                 return;
1987
1988         nv_swncq_fill_sg(qc);
1989 }
1990
1991 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1992 {
1993         struct ata_port *ap = qc->ap;
1994         struct scatterlist *sg;
1995         struct nv_swncq_port_priv *pp = ap->private_data;
1996         struct ata_prd *prd;
1997         unsigned int si, idx;
1998
1999         prd = pp->prd + ATA_MAX_PRD * qc->tag;
2000
2001         idx = 0;
2002         for_each_sg(qc->sg, sg, qc->n_elem, si) {
2003                 u32 addr, offset;
2004                 u32 sg_len, len;
2005
2006                 addr = (u32)sg_dma_address(sg);
2007                 sg_len = sg_dma_len(sg);
2008
2009                 while (sg_len) {
2010                         offset = addr & 0xffff;
2011                         len = sg_len;
2012                         if ((offset + sg_len) > 0x10000)
2013                                 len = 0x10000 - offset;
2014
2015                         prd[idx].addr = cpu_to_le32(addr);
2016                         prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2017
2018                         idx++;
2019                         sg_len -= len;
2020                         addr += len;
2021                 }
2022         }
2023
2024         prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2025 }
2026
2027 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2028                                           struct ata_queued_cmd *qc)
2029 {
2030         struct nv_swncq_port_priv *pp = ap->private_data;
2031
2032         if (qc == NULL)
2033                 return 0;
2034
2035         DPRINTK("Enter\n");
2036
2037         writel((1 << qc->tag), pp->sactive_block);
2038         pp->last_issue_tag = qc->tag;
2039         pp->dhfis_bits &= ~(1 << qc->tag);
2040         pp->dmafis_bits &= ~(1 << qc->tag);
2041         pp->qc_active |= (0x1 << qc->tag);
2042
2043         ap->ops->tf_load(ap, &qc->tf);   /* load tf registers */
2044         ap->ops->exec_command(ap, &qc->tf);
2045
2046         DPRINTK("Issued tag %u\n", qc->tag);
2047
2048         return 0;
2049 }
2050
2051 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2052 {
2053         struct ata_port *ap = qc->ap;
2054         struct nv_swncq_port_priv *pp = ap->private_data;
2055
2056         if (qc->tf.protocol != ATA_PROT_NCQ)
2057                 return ata_qc_issue_prot(qc);
2058
2059         DPRINTK("Enter\n");
2060
2061         if (!pp->qc_active)
2062                 nv_swncq_issue_atacmd(ap, qc);
2063         else
2064                 nv_swncq_qc_to_dq(ap, qc);      /* add qc to defer queue */
2065
2066         return 0;
2067 }
2068
2069 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2070 {
2071         u32 serror;
2072         struct ata_eh_info *ehi = &ap->link.eh_info;
2073
2074         ata_ehi_clear_desc(ehi);
2075
2076         /* AHCI needs SError cleared; otherwise, it might lock up */
2077         sata_scr_read(&ap->link, SCR_ERROR, &serror);
2078         sata_scr_write(&ap->link, SCR_ERROR, serror);
2079
2080         /* analyze @irq_stat */
2081         if (fis & NV_SWNCQ_IRQ_ADDED)
2082                 ata_ehi_push_desc(ehi, "hot plug");
2083         else if (fis & NV_SWNCQ_IRQ_REMOVED)
2084                 ata_ehi_push_desc(ehi, "hot unplug");
2085
2086         ata_ehi_hotplugged(ehi);
2087
2088         /* okay, let's hand over to EH */
2089         ehi->serror |= serror;
2090
2091         ata_port_freeze(ap);
2092 }
2093
2094 static int nv_swncq_sdbfis(struct ata_port *ap)
2095 {
2096         struct ata_queued_cmd *qc;
2097         struct nv_swncq_port_priv *pp = ap->private_data;
2098         struct ata_eh_info *ehi = &ap->link.eh_info;
2099         u32 sactive;
2100         int nr_done = 0;
2101         u32 done_mask;
2102         int i;
2103         u8 host_stat;
2104         u8 lack_dhfis = 0;
2105
2106         host_stat = ap->ops->bmdma_status(ap);
2107         if (unlikely(host_stat & ATA_DMA_ERR)) {
2108                 /* error when transfering data to/from memory */
2109                 ata_ehi_clear_desc(ehi);
2110                 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2111                 ehi->err_mask |= AC_ERR_HOST_BUS;
2112                 ehi->action |= ATA_EH_SOFTRESET;
2113                 return -EINVAL;
2114         }
2115
2116         ap->ops->irq_clear(ap);
2117         __ata_bmdma_stop(ap);
2118
2119         sactive = readl(pp->sactive_block);
2120         done_mask = pp->qc_active ^ sactive;
2121
2122         if (unlikely(done_mask & sactive)) {
2123                 ata_ehi_clear_desc(ehi);
2124                 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2125                                   "(%08x->%08x)", pp->qc_active, sactive);
2126                 ehi->err_mask |= AC_ERR_HSM;
2127                 ehi->action |= ATA_EH_HARDRESET;
2128                 return -EINVAL;
2129         }
2130         for (i = 0; i < ATA_MAX_QUEUE; i++) {
2131                 if (!(done_mask & (1 << i)))
2132                         continue;
2133
2134                 qc = ata_qc_from_tag(ap, i);
2135                 if (qc) {
2136                         ata_qc_complete(qc);
2137                         pp->qc_active &= ~(1 << i);
2138                         pp->dhfis_bits &= ~(1 << i);
2139                         pp->dmafis_bits &= ~(1 << i);
2140                         pp->sdbfis_bits |= (1 << i);
2141                         nr_done++;
2142                 }
2143         }
2144
2145         if (!ap->qc_active) {
2146                 DPRINTK("over\n");
2147                 nv_swncq_pp_reinit(ap);
2148                 return nr_done;
2149         }
2150
2151         if (pp->qc_active & pp->dhfis_bits)
2152                 return nr_done;
2153
2154         if ((pp->ncq_flags & ncq_saw_backout) ||
2155             (pp->qc_active ^ pp->dhfis_bits))
2156                 /* if the controller cann't get a device to host register FIS,
2157                  * The driver needs to reissue the new command.
2158                  */
2159                 lack_dhfis = 1;
2160
2161         DPRINTK("id 0x%x QC: qc_active 0x%x,"
2162                 "SWNCQ:qc_active 0x%X defer_bits %X "
2163                 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2164                 ap->print_id, ap->qc_active, pp->qc_active,
2165                 pp->defer_queue.defer_bits, pp->dhfis_bits,
2166                 pp->dmafis_bits, pp->last_issue_tag);
2167
2168         nv_swncq_fis_reinit(ap);
2169
2170         if (lack_dhfis) {
2171                 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2172                 nv_swncq_issue_atacmd(ap, qc);
2173                 return nr_done;
2174         }
2175
2176         if (pp->defer_queue.defer_bits) {
2177                 /* send deferral queue command */
2178                 qc = nv_swncq_qc_from_dq(ap);
2179                 WARN_ON(qc == NULL);
2180                 nv_swncq_issue_atacmd(ap, qc);
2181         }
2182
2183         return nr_done;
2184 }
2185
2186 static inline u32 nv_swncq_tag(struct ata_port *ap)
2187 {
2188         struct nv_swncq_port_priv *pp = ap->private_data;
2189         u32 tag;
2190
2191         tag = readb(pp->tag_block) >> 2;
2192         return (tag & 0x1f);
2193 }
2194
2195 static int nv_swncq_dmafis(struct ata_port *ap)
2196 {
2197         struct ata_queued_cmd *qc;
2198         unsigned int rw;
2199         u8 dmactl;
2200         u32 tag;
2201         struct nv_swncq_port_priv *pp = ap->private_data;
2202
2203         __ata_bmdma_stop(ap);
2204         tag = nv_swncq_tag(ap);
2205
2206         DPRINTK("dma setup tag 0x%x\n", tag);
2207         qc = ata_qc_from_tag(ap, tag);
2208
2209         if (unlikely(!qc))
2210                 return 0;
2211
2212         rw = qc->tf.flags & ATA_TFLAG_WRITE;
2213
2214         /* load PRD table addr. */
2215         iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2216                   ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2217
2218         /* specify data direction, triple-check start bit is clear */
2219         dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2220         dmactl &= ~ATA_DMA_WR;
2221         if (!rw)
2222                 dmactl |= ATA_DMA_WR;
2223
2224         iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2225
2226         return 1;
2227 }
2228
2229 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2230 {
2231         struct nv_swncq_port_priv *pp = ap->private_data;
2232         struct ata_queued_cmd *qc;
2233         struct ata_eh_info *ehi = &ap->link.eh_info;
2234         u32 serror;
2235         u8 ata_stat;
2236         int rc = 0;
2237
2238         ata_stat = ap->ops->check_status(ap);
2239         nv_swncq_irq_clear(ap, fis);
2240         if (!fis)
2241                 return;
2242
2243         if (ap->pflags & ATA_PFLAG_FROZEN)
2244                 return;
2245
2246         if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2247                 nv_swncq_hotplug(ap, fis);
2248                 return;
2249         }
2250
2251         if (!pp->qc_active)
2252                 return;
2253
2254         if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2255                 return;
2256         ap->ops->scr_write(ap, SCR_ERROR, serror);
2257
2258         if (ata_stat & ATA_ERR) {
2259                 ata_ehi_clear_desc(ehi);
2260                 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2261                 ehi->err_mask |= AC_ERR_DEV;
2262                 ehi->serror |= serror;
2263                 ehi->action |= ATA_EH_SOFTRESET;
2264                 ata_port_freeze(ap);
2265                 return;
2266         }
2267
2268         if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2269                 /* If the IRQ is backout, driver must issue
2270                  * the new command again some time later.
2271                  */
2272                 pp->ncq_flags |= ncq_saw_backout;
2273         }
2274
2275         if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2276                 pp->ncq_flags |= ncq_saw_sdb;
2277                 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2278                         "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2279                         ap->print_id, pp->qc_active, pp->dhfis_bits,
2280                         pp->dmafis_bits, readl(pp->sactive_block));
2281                 rc = nv_swncq_sdbfis(ap);
2282                 if (rc < 0)
2283                         goto irq_error;
2284         }
2285
2286         if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2287                 /* The interrupt indicates the new command
2288                  * was transmitted correctly to the drive.
2289                  */
2290                 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2291                 pp->ncq_flags |= ncq_saw_d2h;
2292                 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2293                         ata_ehi_push_desc(ehi, "illegal fis transaction");
2294                         ehi->err_mask |= AC_ERR_HSM;
2295                         ehi->action |= ATA_EH_HARDRESET;
2296                         goto irq_error;
2297                 }
2298
2299                 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2300                     !(pp->ncq_flags & ncq_saw_dmas)) {
2301                         ata_stat = ap->ops->check_status(ap);
2302                         if (ata_stat & ATA_BUSY)
2303                                 goto irq_exit;
2304
2305                         if (pp->defer_queue.defer_bits) {
2306                                 DPRINTK("send next command\n");
2307                                 qc = nv_swncq_qc_from_dq(ap);
2308                                 nv_swncq_issue_atacmd(ap, qc);
2309                         }
2310                 }
2311         }
2312
2313         if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2314                 /* program the dma controller with appropriate PRD buffers
2315                  * and start the DMA transfer for requested command.
2316                  */
2317                 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2318                 pp->ncq_flags |= ncq_saw_dmas;
2319                 rc = nv_swncq_dmafis(ap);
2320         }
2321
2322 irq_exit:
2323         return;
2324 irq_error:
2325         ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2326         ata_port_freeze(ap);
2327         return;
2328 }
2329
2330 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2331 {
2332         struct ata_host *host = dev_instance;
2333         unsigned int i;
2334         unsigned int handled = 0;
2335         unsigned long flags;
2336         u32 irq_stat;
2337
2338         spin_lock_irqsave(&host->lock, flags);
2339
2340         irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2341
2342         for (i = 0; i < host->n_ports; i++) {
2343                 struct ata_port *ap = host->ports[i];
2344
2345                 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2346                         if (ap->link.sactive) {
2347                                 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2348                                 handled = 1;
2349                         } else {
2350                                 if (irq_stat)   /* reserve Hotplug */
2351                                         nv_swncq_irq_clear(ap, 0xfff0);
2352
2353                                 handled += nv_host_intr(ap, (u8)irq_stat);
2354                         }
2355                 }
2356                 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2357         }
2358
2359         spin_unlock_irqrestore(&host->lock, flags);
2360
2361         return IRQ_RETVAL(handled);
2362 }
2363
2364 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2365 {
2366         static int printed_version;
2367         const struct ata_port_info *ppi[] = { NULL, NULL };
2368         struct ata_host *host;
2369         struct nv_host_priv *hpriv;
2370         int rc;
2371         u32 bar;
2372         void __iomem *base;
2373         unsigned long type = ent->driver_data;
2374
2375         // Make sure this is a SATA controller by counting the number of bars
2376         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2377         // it's an IDE controller and we ignore it.
2378         for (bar = 0; bar < 6; bar++)
2379                 if (pci_resource_start(pdev, bar) == 0)
2380                         return -ENODEV;
2381
2382         if (!printed_version++)
2383                 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2384
2385         rc = pcim_enable_device(pdev);
2386         if (rc)
2387                 return rc;
2388
2389         /* determine type and allocate host */
2390         if (type == CK804 && adma_enabled) {
2391                 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2392                 type = ADMA;
2393         }
2394
2395         if (type == SWNCQ) {
2396                 if (swncq_enabled)
2397                         dev_printk(KERN_NOTICE, &pdev->dev,
2398                                    "Using SWNCQ mode\n");
2399                 else
2400                         type = GENERIC;
2401         }
2402
2403         ppi[0] = &nv_port_info[type];
2404         rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
2405         if (rc)
2406                 return rc;
2407
2408         hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2409         if (!hpriv)
2410                 return -ENOMEM;
2411         hpriv->type = type;
2412         host->private_data = hpriv;
2413
2414         /* set 64bit dma masks, may fail */
2415         if (type == ADMA) {
2416                 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
2417                         pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2418         }
2419
2420         /* request and iomap NV_MMIO_BAR */
2421         rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2422         if (rc)
2423                 return rc;
2424
2425         /* configure SCR access */
2426         base = host->iomap[NV_MMIO_BAR];
2427         host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2428         host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2429
2430         /* enable SATA space for CK804 */
2431         if (type >= CK804) {
2432                 u8 regval;
2433
2434                 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2435                 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2436                 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2437         }
2438
2439         /* init ADMA */
2440         if (type == ADMA) {
2441                 rc = nv_adma_host_init(host);
2442                 if (rc)
2443                         return rc;
2444         } else if (type == SWNCQ)
2445                 nv_swncq_host_init(host);
2446
2447         pci_set_master(pdev);
2448         return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
2449                                  IRQF_SHARED, ppi[0]->sht);
2450 }
2451
2452 #ifdef CONFIG_PM
2453 static int nv_pci_device_resume(struct pci_dev *pdev)
2454 {
2455         struct ata_host *host = dev_get_drvdata(&pdev->dev);
2456         struct nv_host_priv *hpriv = host->private_data;
2457         int rc;
2458
2459         rc = ata_pci_device_do_resume(pdev);
2460         if (rc)
2461                 return rc;
2462
2463         if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2464                 if (hpriv->type >= CK804) {
2465                         u8 regval;
2466
2467                         pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2468                         regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2469                         pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2470                 }
2471                 if (hpriv->type == ADMA) {
2472                         u32 tmp32;
2473                         struct nv_adma_port_priv *pp;
2474                         /* enable/disable ADMA on the ports appropriately */
2475                         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2476
2477                         pp = host->ports[0]->private_data;
2478                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2479                                 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2480                                            NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2481                         else
2482                                 tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2483                                            NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2484                         pp = host->ports[1]->private_data;
2485                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2486                                 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2487                                            NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2488                         else
2489                                 tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2490                                            NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2491
2492                         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2493                 }
2494         }
2495
2496         ata_host_resume(host);
2497
2498         return 0;
2499 }
2500 #endif
2501
2502 static void nv_ck804_host_stop(struct ata_host *host)
2503 {
2504         struct pci_dev *pdev = to_pci_dev(host->dev);
2505         u8 regval;
2506
2507         /* disable SATA space for CK804 */
2508         pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2509         regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2510         pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2511 }
2512
2513 static void nv_adma_host_stop(struct ata_host *host)
2514 {
2515         struct pci_dev *pdev = to_pci_dev(host->dev);
2516         u32 tmp32;
2517
2518         /* disable ADMA on the ports */
2519         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2520         tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2521                    NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2522                    NV_MCP_SATA_CFG_20_PORT1_EN |
2523                    NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2524
2525         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2526
2527         nv_ck804_host_stop(host);
2528 }
2529
2530 static int __init nv_init(void)
2531 {
2532         return pci_register_driver(&nv_pci_driver);
2533 }
2534
2535 static void __exit nv_exit(void)
2536 {
2537         pci_unregister_driver(&nv_pci_driver);
2538 }
2539
2540 module_init(nv_init);
2541 module_exit(nv_exit);
2542 module_param_named(adma, adma_enabled, bool, 0444);
2543 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2544 module_param_named(swncq, swncq_enabled, bool, 0444);
2545 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
2546