[IPV4]: Remove bugus goto-s from ip_route_input_slow
[linux-2.6] / drivers / ata / sata_nv.c
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50
51 #define DRV_NAME                        "sata_nv"
52 #define DRV_VERSION                     "3.5"
53
54 #define NV_ADMA_DMA_BOUNDARY            0xffffffffUL
55
56 enum {
57         NV_MMIO_BAR                     = 5,
58
59         NV_PORTS                        = 2,
60         NV_PIO_MASK                     = 0x1f,
61         NV_MWDMA_MASK                   = 0x07,
62         NV_UDMA_MASK                    = 0x7f,
63         NV_PORT0_SCR_REG_OFFSET         = 0x00,
64         NV_PORT1_SCR_REG_OFFSET         = 0x40,
65
66         /* INT_STATUS/ENABLE */
67         NV_INT_STATUS                   = 0x10,
68         NV_INT_ENABLE                   = 0x11,
69         NV_INT_STATUS_CK804             = 0x440,
70         NV_INT_ENABLE_CK804             = 0x441,
71
72         /* INT_STATUS/ENABLE bits */
73         NV_INT_DEV                      = 0x01,
74         NV_INT_PM                       = 0x02,
75         NV_INT_ADDED                    = 0x04,
76         NV_INT_REMOVED                  = 0x08,
77
78         NV_INT_PORT_SHIFT               = 4,    /* each port occupies 4 bits */
79
80         NV_INT_ALL                      = 0x0f,
81         NV_INT_MASK                     = NV_INT_DEV |
82                                           NV_INT_ADDED | NV_INT_REMOVED,
83
84         /* INT_CONFIG */
85         NV_INT_CONFIG                   = 0x12,
86         NV_INT_CONFIG_METHD             = 0x01, // 0 = INT, 1 = SMI
87
88         // For PCI config register 20
89         NV_MCP_SATA_CFG_20              = 0x50,
90         NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91         NV_MCP_SATA_CFG_20_PORT0_EN     = (1 << 17),
92         NV_MCP_SATA_CFG_20_PORT1_EN     = (1 << 16),
93         NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94         NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96         NV_ADMA_MAX_CPBS                = 32,
97         NV_ADMA_CPB_SZ                  = 128,
98         NV_ADMA_APRD_SZ                 = 16,
99         NV_ADMA_SGTBL_LEN               = (1024 - NV_ADMA_CPB_SZ) /
100                                            NV_ADMA_APRD_SZ,
101         NV_ADMA_SGTBL_TOTAL_LEN         = NV_ADMA_SGTBL_LEN + 5,
102         NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103         NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104                                            (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106         /* BAR5 offset to ADMA general registers */
107         NV_ADMA_GEN                     = 0x400,
108         NV_ADMA_GEN_CTL                 = 0x00,
109         NV_ADMA_NOTIFIER_CLEAR          = 0x30,
110
111         /* BAR5 offset to ADMA ports */
112         NV_ADMA_PORT                    = 0x480,
113
114         /* size of ADMA port register space  */
115         NV_ADMA_PORT_SIZE               = 0x100,
116
117         /* ADMA port registers */
118         NV_ADMA_CTL                     = 0x40,
119         NV_ADMA_CPB_COUNT               = 0x42,
120         NV_ADMA_NEXT_CPB_IDX            = 0x43,
121         NV_ADMA_STAT                    = 0x44,
122         NV_ADMA_CPB_BASE_LOW            = 0x48,
123         NV_ADMA_CPB_BASE_HIGH           = 0x4C,
124         NV_ADMA_APPEND                  = 0x50,
125         NV_ADMA_NOTIFIER                = 0x68,
126         NV_ADMA_NOTIFIER_ERROR          = 0x6C,
127
128         /* NV_ADMA_CTL register bits */
129         NV_ADMA_CTL_HOTPLUG_IEN         = (1 << 0),
130         NV_ADMA_CTL_CHANNEL_RESET       = (1 << 5),
131         NV_ADMA_CTL_GO                  = (1 << 7),
132         NV_ADMA_CTL_AIEN                = (1 << 8),
133         NV_ADMA_CTL_READ_NON_COHERENT   = (1 << 11),
134         NV_ADMA_CTL_WRITE_NON_COHERENT  = (1 << 12),
135
136         /* CPB response flag bits */
137         NV_CPB_RESP_DONE                = (1 << 0),
138         NV_CPB_RESP_ATA_ERR             = (1 << 3),
139         NV_CPB_RESP_CMD_ERR             = (1 << 4),
140         NV_CPB_RESP_CPB_ERR             = (1 << 7),
141
142         /* CPB control flag bits */
143         NV_CPB_CTL_CPB_VALID            = (1 << 0),
144         NV_CPB_CTL_QUEUE                = (1 << 1),
145         NV_CPB_CTL_APRD_VALID           = (1 << 2),
146         NV_CPB_CTL_IEN                  = (1 << 3),
147         NV_CPB_CTL_FPDMA                = (1 << 4),
148
149         /* APRD flags */
150         NV_APRD_WRITE                   = (1 << 1),
151         NV_APRD_END                     = (1 << 2),
152         NV_APRD_CONT                    = (1 << 3),
153
154         /* NV_ADMA_STAT flags */
155         NV_ADMA_STAT_TIMEOUT            = (1 << 0),
156         NV_ADMA_STAT_HOTUNPLUG          = (1 << 1),
157         NV_ADMA_STAT_HOTPLUG            = (1 << 2),
158         NV_ADMA_STAT_CPBERR             = (1 << 4),
159         NV_ADMA_STAT_SERROR             = (1 << 5),
160         NV_ADMA_STAT_CMD_COMPLETE       = (1 << 6),
161         NV_ADMA_STAT_IDLE               = (1 << 8),
162         NV_ADMA_STAT_LEGACY             = (1 << 9),
163         NV_ADMA_STAT_STOPPED            = (1 << 10),
164         NV_ADMA_STAT_DONE               = (1 << 12),
165         NV_ADMA_STAT_ERR                = NV_ADMA_STAT_CPBERR |
166                                           NV_ADMA_STAT_TIMEOUT,
167
168         /* port flags */
169         NV_ADMA_PORT_REGISTER_MODE      = (1 << 0),
170         NV_ADMA_ATAPI_SETUP_COMPLETE    = (1 << 1),
171
172         /* MCP55 reg offset */
173         NV_CTL_MCP55                    = 0x400,
174         NV_INT_STATUS_MCP55             = 0x440,
175         NV_INT_ENABLE_MCP55             = 0x444,
176         NV_NCQ_REG_MCP55                = 0x448,
177
178         /* MCP55 */
179         NV_INT_ALL_MCP55                = 0xffff,
180         NV_INT_PORT_SHIFT_MCP55         = 16,   /* each port occupies 16 bits */
181         NV_INT_MASK_MCP55               = NV_INT_ALL_MCP55 & 0xfffd,
182
183         /* SWNCQ ENABLE BITS*/
184         NV_CTL_PRI_SWNCQ                = 0x02,
185         NV_CTL_SEC_SWNCQ                = 0x04,
186
187         /* SW NCQ status bits*/
188         NV_SWNCQ_IRQ_DEV                = (1 << 0),
189         NV_SWNCQ_IRQ_PM                 = (1 << 1),
190         NV_SWNCQ_IRQ_ADDED              = (1 << 2),
191         NV_SWNCQ_IRQ_REMOVED            = (1 << 3),
192
193         NV_SWNCQ_IRQ_BACKOUT            = (1 << 4),
194         NV_SWNCQ_IRQ_SDBFIS             = (1 << 5),
195         NV_SWNCQ_IRQ_DHREGFIS           = (1 << 6),
196         NV_SWNCQ_IRQ_DMASETUP           = (1 << 7),
197
198         NV_SWNCQ_IRQ_HOTPLUG            = NV_SWNCQ_IRQ_ADDED |
199                                           NV_SWNCQ_IRQ_REMOVED,
200
201 };
202
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205         __le64                  addr;
206         __le32                  len;
207         u8                      flags;
208         u8                      packet_len;
209         __le16                  reserved;
210 };
211
212 enum nv_adma_regbits {
213         CMDEND  = (1 << 15),            /* end of command list */
214         WNB     = (1 << 14),            /* wait-not-BSY */
215         IGN     = (1 << 13),            /* ignore this entry */
216         CS1n    = (1 << (4 + 8)),       /* std. PATA signals follow... */
217         DA2     = (1 << (2 + 8)),
218         DA1     = (1 << (1 + 8)),
219         DA0     = (1 << (0 + 8)),
220 };
221
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227         u8                      resp_flags;    /* 0 */
228         u8                      reserved1;     /* 1 */
229         u8                      ctl_flags;     /* 2 */
230         /* len is length of taskfile in 64 bit words */
231         u8                      len;            /* 3  */
232         u8                      tag;           /* 4 */
233         u8                      next_cpb_idx;  /* 5 */
234         __le16                  reserved2;     /* 6-7 */
235         __le16                  tf[12];        /* 8-31 */
236         struct nv_adma_prd      aprd[5];       /* 32-111 */
237         __le64                  next_aprd;     /* 112-119 */
238         __le64                  reserved3;     /* 120-127 */
239 };
240
241
242 struct nv_adma_port_priv {
243         struct nv_adma_cpb      *cpb;
244         dma_addr_t              cpb_dma;
245         struct nv_adma_prd      *aprd;
246         dma_addr_t              aprd_dma;
247         void __iomem            *ctl_block;
248         void __iomem            *gen_block;
249         void __iomem            *notifier_clear_block;
250         u8                      flags;
251         int                     last_issue_ncq;
252 };
253
254 struct nv_host_priv {
255         unsigned long           type;
256 };
257
258 struct defer_queue {
259         u32             defer_bits;
260         unsigned int    head;
261         unsigned int    tail;
262         unsigned int    tag[ATA_MAX_QUEUE];
263 };
264
265 enum ncq_saw_flag_list {
266         ncq_saw_d2h     = (1U << 0),
267         ncq_saw_dmas    = (1U << 1),
268         ncq_saw_sdb     = (1U << 2),
269         ncq_saw_backout = (1U << 3),
270 };
271
272 struct nv_swncq_port_priv {
273         struct ata_prd  *prd;    /* our SG list */
274         dma_addr_t      prd_dma; /* and its DMA mapping */
275         void __iomem    *sactive_block;
276         void __iomem    *irq_block;
277         void __iomem    *tag_block;
278         u32             qc_active;
279
280         unsigned int    last_issue_tag;
281
282         /* fifo circular queue to store deferral command */
283         struct defer_queue defer_queue;
284
285         /* for NCQ interrupt analysis */
286         u32             dhfis_bits;
287         u32             dmafis_bits;
288         u32             sdbfis_bits;
289
290         unsigned int    ncq_flags;
291 };
292
293
294 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
295
296 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
297 #ifdef CONFIG_PM
298 static int nv_pci_device_resume(struct pci_dev *pdev);
299 #endif
300 static void nv_ck804_host_stop(struct ata_host *host);
301 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
302 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
304 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
305 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
306
307 static void nv_nf2_freeze(struct ata_port *ap);
308 static void nv_nf2_thaw(struct ata_port *ap);
309 static void nv_ck804_freeze(struct ata_port *ap);
310 static void nv_ck804_thaw(struct ata_port *ap);
311 static void nv_error_handler(struct ata_port *ap);
312 static int nv_adma_slave_config(struct scsi_device *sdev);
313 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
314 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
315 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
316 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
317 static void nv_adma_irq_clear(struct ata_port *ap);
318 static int nv_adma_port_start(struct ata_port *ap);
319 static void nv_adma_port_stop(struct ata_port *ap);
320 #ifdef CONFIG_PM
321 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
322 static int nv_adma_port_resume(struct ata_port *ap);
323 #endif
324 static void nv_adma_freeze(struct ata_port *ap);
325 static void nv_adma_thaw(struct ata_port *ap);
326 static void nv_adma_error_handler(struct ata_port *ap);
327 static void nv_adma_host_stop(struct ata_host *host);
328 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
329 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
330
331 static void nv_mcp55_thaw(struct ata_port *ap);
332 static void nv_mcp55_freeze(struct ata_port *ap);
333 static void nv_swncq_error_handler(struct ata_port *ap);
334 static int nv_swncq_slave_config(struct scsi_device *sdev);
335 static int nv_swncq_port_start(struct ata_port *ap);
336 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
337 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
338 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
339 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
340 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
341 #ifdef CONFIG_PM
342 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
343 static int nv_swncq_port_resume(struct ata_port *ap);
344 #endif
345
346 enum nv_host_type
347 {
348         GENERIC,
349         NFORCE2,
350         NFORCE3 = NFORCE2,      /* NF2 == NF3 as far as sata_nv is concerned */
351         CK804,
352         ADMA,
353         SWNCQ,
354 };
355
356 static const struct pci_device_id nv_pci_tbl[] = {
357         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
358         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
359         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
360         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
361         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
362         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
363         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
364         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
365         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
366         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
367         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
368         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
369         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
370         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
371
372         { } /* terminate list */
373 };
374
375 static struct pci_driver nv_pci_driver = {
376         .name                   = DRV_NAME,
377         .id_table               = nv_pci_tbl,
378         .probe                  = nv_init_one,
379 #ifdef CONFIG_PM
380         .suspend                = ata_pci_device_suspend,
381         .resume                 = nv_pci_device_resume,
382 #endif
383         .remove                 = ata_pci_remove_one,
384 };
385
386 static struct scsi_host_template nv_sht = {
387         .module                 = THIS_MODULE,
388         .name                   = DRV_NAME,
389         .ioctl                  = ata_scsi_ioctl,
390         .queuecommand           = ata_scsi_queuecmd,
391         .can_queue              = ATA_DEF_QUEUE,
392         .this_id                = ATA_SHT_THIS_ID,
393         .sg_tablesize           = LIBATA_MAX_PRD,
394         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
395         .emulated               = ATA_SHT_EMULATED,
396         .use_clustering         = ATA_SHT_USE_CLUSTERING,
397         .proc_name              = DRV_NAME,
398         .dma_boundary           = ATA_DMA_BOUNDARY,
399         .slave_configure        = ata_scsi_slave_config,
400         .slave_destroy          = ata_scsi_slave_destroy,
401         .bios_param             = ata_std_bios_param,
402 };
403
404 static struct scsi_host_template nv_adma_sht = {
405         .module                 = THIS_MODULE,
406         .name                   = DRV_NAME,
407         .ioctl                  = ata_scsi_ioctl,
408         .queuecommand           = ata_scsi_queuecmd,
409         .change_queue_depth     = ata_scsi_change_queue_depth,
410         .can_queue              = NV_ADMA_MAX_CPBS,
411         .this_id                = ATA_SHT_THIS_ID,
412         .sg_tablesize           = NV_ADMA_SGTBL_TOTAL_LEN,
413         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
414         .emulated               = ATA_SHT_EMULATED,
415         .use_clustering         = ATA_SHT_USE_CLUSTERING,
416         .proc_name              = DRV_NAME,
417         .dma_boundary           = NV_ADMA_DMA_BOUNDARY,
418         .slave_configure        = nv_adma_slave_config,
419         .slave_destroy          = ata_scsi_slave_destroy,
420         .bios_param             = ata_std_bios_param,
421 };
422
423 static struct scsi_host_template nv_swncq_sht = {
424         .module                 = THIS_MODULE,
425         .name                   = DRV_NAME,
426         .ioctl                  = ata_scsi_ioctl,
427         .queuecommand           = ata_scsi_queuecmd,
428         .change_queue_depth     = ata_scsi_change_queue_depth,
429         .can_queue              = ATA_MAX_QUEUE,
430         .this_id                = ATA_SHT_THIS_ID,
431         .sg_tablesize           = LIBATA_MAX_PRD,
432         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
433         .emulated               = ATA_SHT_EMULATED,
434         .use_clustering         = ATA_SHT_USE_CLUSTERING,
435         .proc_name              = DRV_NAME,
436         .dma_boundary           = ATA_DMA_BOUNDARY,
437         .slave_configure        = nv_swncq_slave_config,
438         .slave_destroy          = ata_scsi_slave_destroy,
439         .bios_param             = ata_std_bios_param,
440 };
441
442 static const struct ata_port_operations nv_generic_ops = {
443         .tf_load                = ata_tf_load,
444         .tf_read                = ata_tf_read,
445         .exec_command           = ata_exec_command,
446         .check_status           = ata_check_status,
447         .dev_select             = ata_std_dev_select,
448         .bmdma_setup            = ata_bmdma_setup,
449         .bmdma_start            = ata_bmdma_start,
450         .bmdma_stop             = ata_bmdma_stop,
451         .bmdma_status           = ata_bmdma_status,
452         .qc_prep                = ata_qc_prep,
453         .qc_issue               = ata_qc_issue_prot,
454         .freeze                 = ata_bmdma_freeze,
455         .thaw                   = ata_bmdma_thaw,
456         .error_handler          = nv_error_handler,
457         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
458         .data_xfer              = ata_data_xfer,
459         .irq_clear              = ata_bmdma_irq_clear,
460         .irq_on                 = ata_irq_on,
461         .scr_read               = nv_scr_read,
462         .scr_write              = nv_scr_write,
463         .port_start             = ata_port_start,
464 };
465
466 static const struct ata_port_operations nv_nf2_ops = {
467         .tf_load                = ata_tf_load,
468         .tf_read                = ata_tf_read,
469         .exec_command           = ata_exec_command,
470         .check_status           = ata_check_status,
471         .dev_select             = ata_std_dev_select,
472         .bmdma_setup            = ata_bmdma_setup,
473         .bmdma_start            = ata_bmdma_start,
474         .bmdma_stop             = ata_bmdma_stop,
475         .bmdma_status           = ata_bmdma_status,
476         .qc_prep                = ata_qc_prep,
477         .qc_issue               = ata_qc_issue_prot,
478         .freeze                 = nv_nf2_freeze,
479         .thaw                   = nv_nf2_thaw,
480         .error_handler          = nv_error_handler,
481         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
482         .data_xfer              = ata_data_xfer,
483         .irq_clear              = ata_bmdma_irq_clear,
484         .irq_on                 = ata_irq_on,
485         .scr_read               = nv_scr_read,
486         .scr_write              = nv_scr_write,
487         .port_start             = ata_port_start,
488 };
489
490 static const struct ata_port_operations nv_ck804_ops = {
491         .tf_load                = ata_tf_load,
492         .tf_read                = ata_tf_read,
493         .exec_command           = ata_exec_command,
494         .check_status           = ata_check_status,
495         .dev_select             = ata_std_dev_select,
496         .bmdma_setup            = ata_bmdma_setup,
497         .bmdma_start            = ata_bmdma_start,
498         .bmdma_stop             = ata_bmdma_stop,
499         .bmdma_status           = ata_bmdma_status,
500         .qc_prep                = ata_qc_prep,
501         .qc_issue               = ata_qc_issue_prot,
502         .freeze                 = nv_ck804_freeze,
503         .thaw                   = nv_ck804_thaw,
504         .error_handler          = nv_error_handler,
505         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
506         .data_xfer              = ata_data_xfer,
507         .irq_clear              = ata_bmdma_irq_clear,
508         .irq_on                 = ata_irq_on,
509         .scr_read               = nv_scr_read,
510         .scr_write              = nv_scr_write,
511         .port_start             = ata_port_start,
512         .host_stop              = nv_ck804_host_stop,
513 };
514
515 static const struct ata_port_operations nv_adma_ops = {
516         .tf_load                = ata_tf_load,
517         .tf_read                = nv_adma_tf_read,
518         .check_atapi_dma        = nv_adma_check_atapi_dma,
519         .exec_command           = ata_exec_command,
520         .check_status           = ata_check_status,
521         .dev_select             = ata_std_dev_select,
522         .bmdma_setup            = ata_bmdma_setup,
523         .bmdma_start            = ata_bmdma_start,
524         .bmdma_stop             = ata_bmdma_stop,
525         .bmdma_status           = ata_bmdma_status,
526         .qc_defer               = ata_std_qc_defer,
527         .qc_prep                = nv_adma_qc_prep,
528         .qc_issue               = nv_adma_qc_issue,
529         .freeze                 = nv_adma_freeze,
530         .thaw                   = nv_adma_thaw,
531         .error_handler          = nv_adma_error_handler,
532         .post_internal_cmd      = nv_adma_post_internal_cmd,
533         .data_xfer              = ata_data_xfer,
534         .irq_clear              = nv_adma_irq_clear,
535         .irq_on                 = ata_irq_on,
536         .scr_read               = nv_scr_read,
537         .scr_write              = nv_scr_write,
538         .port_start             = nv_adma_port_start,
539         .port_stop              = nv_adma_port_stop,
540 #ifdef CONFIG_PM
541         .port_suspend           = nv_adma_port_suspend,
542         .port_resume            = nv_adma_port_resume,
543 #endif
544         .host_stop              = nv_adma_host_stop,
545 };
546
547 static const struct ata_port_operations nv_swncq_ops = {
548         .tf_load                = ata_tf_load,
549         .tf_read                = ata_tf_read,
550         .exec_command           = ata_exec_command,
551         .check_status           = ata_check_status,
552         .dev_select             = ata_std_dev_select,
553         .bmdma_setup            = ata_bmdma_setup,
554         .bmdma_start            = ata_bmdma_start,
555         .bmdma_stop             = ata_bmdma_stop,
556         .bmdma_status           = ata_bmdma_status,
557         .qc_defer               = ata_std_qc_defer,
558         .qc_prep                = nv_swncq_qc_prep,
559         .qc_issue               = nv_swncq_qc_issue,
560         .freeze                 = nv_mcp55_freeze,
561         .thaw                   = nv_mcp55_thaw,
562         .error_handler          = nv_swncq_error_handler,
563         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
564         .data_xfer              = ata_data_xfer,
565         .irq_clear              = ata_bmdma_irq_clear,
566         .irq_on                 = ata_irq_on,
567         .scr_read               = nv_scr_read,
568         .scr_write              = nv_scr_write,
569 #ifdef CONFIG_PM
570         .port_suspend           = nv_swncq_port_suspend,
571         .port_resume            = nv_swncq_port_resume,
572 #endif
573         .port_start             = nv_swncq_port_start,
574 };
575
576 static const struct ata_port_info nv_port_info[] = {
577         /* generic */
578         {
579                 .sht            = &nv_sht,
580                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
581                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
582                 .pio_mask       = NV_PIO_MASK,
583                 .mwdma_mask     = NV_MWDMA_MASK,
584                 .udma_mask      = NV_UDMA_MASK,
585                 .port_ops       = &nv_generic_ops,
586                 .irq_handler    = nv_generic_interrupt,
587         },
588         /* nforce2/3 */
589         {
590                 .sht            = &nv_sht,
591                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
592                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
593                 .pio_mask       = NV_PIO_MASK,
594                 .mwdma_mask     = NV_MWDMA_MASK,
595                 .udma_mask      = NV_UDMA_MASK,
596                 .port_ops       = &nv_nf2_ops,
597                 .irq_handler    = nv_nf2_interrupt,
598         },
599         /* ck804 */
600         {
601                 .sht            = &nv_sht,
602                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
603                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
604                 .pio_mask       = NV_PIO_MASK,
605                 .mwdma_mask     = NV_MWDMA_MASK,
606                 .udma_mask      = NV_UDMA_MASK,
607                 .port_ops       = &nv_ck804_ops,
608                 .irq_handler    = nv_ck804_interrupt,
609         },
610         /* ADMA */
611         {
612                 .sht            = &nv_adma_sht,
613                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
614                                   ATA_FLAG_MMIO | ATA_FLAG_NCQ,
615                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
616                 .pio_mask       = NV_PIO_MASK,
617                 .mwdma_mask     = NV_MWDMA_MASK,
618                 .udma_mask      = NV_UDMA_MASK,
619                 .port_ops       = &nv_adma_ops,
620                 .irq_handler    = nv_adma_interrupt,
621         },
622         /* SWNCQ */
623         {
624                 .sht            = &nv_swncq_sht,
625                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
626                                   ATA_FLAG_NCQ,
627                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
628                 .pio_mask       = NV_PIO_MASK,
629                 .mwdma_mask     = NV_MWDMA_MASK,
630                 .udma_mask      = NV_UDMA_MASK,
631                 .port_ops       = &nv_swncq_ops,
632                 .irq_handler    = nv_swncq_interrupt,
633         },
634 };
635
636 MODULE_AUTHOR("NVIDIA");
637 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
638 MODULE_LICENSE("GPL");
639 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
640 MODULE_VERSION(DRV_VERSION);
641
642 static int adma_enabled = 1;
643 static int swncq_enabled;
644
645 static void nv_adma_register_mode(struct ata_port *ap)
646 {
647         struct nv_adma_port_priv *pp = ap->private_data;
648         void __iomem *mmio = pp->ctl_block;
649         u16 tmp, status;
650         int count = 0;
651
652         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
653                 return;
654
655         status = readw(mmio + NV_ADMA_STAT);
656         while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
657                 ndelay(50);
658                 status = readw(mmio + NV_ADMA_STAT);
659                 count++;
660         }
661         if (count == 20)
662                 ata_port_printk(ap, KERN_WARNING,
663                         "timeout waiting for ADMA IDLE, stat=0x%hx\n",
664                         status);
665
666         tmp = readw(mmio + NV_ADMA_CTL);
667         writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
668
669         count = 0;
670         status = readw(mmio + NV_ADMA_STAT);
671         while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
672                 ndelay(50);
673                 status = readw(mmio + NV_ADMA_STAT);
674                 count++;
675         }
676         if (count == 20)
677                 ata_port_printk(ap, KERN_WARNING,
678                          "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
679                          status);
680
681         pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
682 }
683
684 static void nv_adma_mode(struct ata_port *ap)
685 {
686         struct nv_adma_port_priv *pp = ap->private_data;
687         void __iomem *mmio = pp->ctl_block;
688         u16 tmp, status;
689         int count = 0;
690
691         if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
692                 return;
693
694         WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
695
696         tmp = readw(mmio + NV_ADMA_CTL);
697         writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
698
699         status = readw(mmio + NV_ADMA_STAT);
700         while (((status & NV_ADMA_STAT_LEGACY) ||
701               !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
702                 ndelay(50);
703                 status = readw(mmio + NV_ADMA_STAT);
704                 count++;
705         }
706         if (count == 20)
707                 ata_port_printk(ap, KERN_WARNING,
708                         "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
709                         status);
710
711         pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
712 }
713
714 static int nv_adma_slave_config(struct scsi_device *sdev)
715 {
716         struct ata_port *ap = ata_shost_to_port(sdev->host);
717         struct nv_adma_port_priv *pp = ap->private_data;
718         struct pci_dev *pdev = to_pci_dev(ap->host->dev);
719         u64 bounce_limit;
720         unsigned long segment_boundary;
721         unsigned short sg_tablesize;
722         int rc;
723         int adma_enable;
724         u32 current_reg, new_reg, config_mask;
725
726         rc = ata_scsi_slave_config(sdev);
727
728         if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
729                 /* Not a proper libata device, ignore */
730                 return rc;
731
732         if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
733                 /*
734                  * NVIDIA reports that ADMA mode does not support ATAPI commands.
735                  * Therefore ATAPI commands are sent through the legacy interface.
736                  * However, the legacy interface only supports 32-bit DMA.
737                  * Restrict DMA parameters as required by the legacy interface
738                  * when an ATAPI device is connected.
739                  */
740                 bounce_limit = ATA_DMA_MASK;
741                 segment_boundary = ATA_DMA_BOUNDARY;
742                 /* Subtract 1 since an extra entry may be needed for padding, see
743                    libata-scsi.c */
744                 sg_tablesize = LIBATA_MAX_PRD - 1;
745
746                 /* Since the legacy DMA engine is in use, we need to disable ADMA
747                    on the port. */
748                 adma_enable = 0;
749                 nv_adma_register_mode(ap);
750         } else {
751                 bounce_limit = *ap->dev->dma_mask;
752                 segment_boundary = NV_ADMA_DMA_BOUNDARY;
753                 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
754                 adma_enable = 1;
755         }
756
757         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
758
759         if (ap->port_no == 1)
760                 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
761                               NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
762         else
763                 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
764                               NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
765
766         if (adma_enable) {
767                 new_reg = current_reg | config_mask;
768                 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
769         } else {
770                 new_reg = current_reg & ~config_mask;
771                 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
772         }
773
774         if (current_reg != new_reg)
775                 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
776
777         blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
778         blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
779         blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
780         ata_port_printk(ap, KERN_INFO,
781                 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
782                 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
783         return rc;
784 }
785
786 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
787 {
788         struct nv_adma_port_priv *pp = qc->ap->private_data;
789         return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
790 }
791
792 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
793 {
794         /* Since commands where a result TF is requested are not
795            executed in ADMA mode, the only time this function will be called
796            in ADMA mode will be if a command fails. In this case we
797            don't care about going into register mode with ADMA commands
798            pending, as the commands will all shortly be aborted anyway. */
799         nv_adma_register_mode(ap);
800
801         ata_tf_read(ap, tf);
802 }
803
804 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
805 {
806         unsigned int idx = 0;
807
808         if (tf->flags & ATA_TFLAG_ISADDR) {
809                 if (tf->flags & ATA_TFLAG_LBA48) {
810                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
811                         cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
812                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
813                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
814                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
815                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
816                 } else
817                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
818
819                 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
820                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
821                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
822                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
823         }
824
825         if (tf->flags & ATA_TFLAG_DEVICE)
826                 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
827
828         cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
829
830         while (idx < 12)
831                 cpb[idx++] = cpu_to_le16(IGN);
832
833         return idx;
834 }
835
836 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
837 {
838         struct nv_adma_port_priv *pp = ap->private_data;
839         u8 flags = pp->cpb[cpb_num].resp_flags;
840
841         VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
842
843         if (unlikely((force_err ||
844                      flags & (NV_CPB_RESP_ATA_ERR |
845                               NV_CPB_RESP_CMD_ERR |
846                               NV_CPB_RESP_CPB_ERR)))) {
847                 struct ata_eh_info *ehi = &ap->link.eh_info;
848                 int freeze = 0;
849
850                 ata_ehi_clear_desc(ehi);
851                 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
852                 if (flags & NV_CPB_RESP_ATA_ERR) {
853                         ata_ehi_push_desc(ehi, "ATA error");
854                         ehi->err_mask |= AC_ERR_DEV;
855                 } else if (flags & NV_CPB_RESP_CMD_ERR) {
856                         ata_ehi_push_desc(ehi, "CMD error");
857                         ehi->err_mask |= AC_ERR_DEV;
858                 } else if (flags & NV_CPB_RESP_CPB_ERR) {
859                         ata_ehi_push_desc(ehi, "CPB error");
860                         ehi->err_mask |= AC_ERR_SYSTEM;
861                         freeze = 1;
862                 } else {
863                         /* notifier error, but no error in CPB flags? */
864                         ata_ehi_push_desc(ehi, "unknown");
865                         ehi->err_mask |= AC_ERR_OTHER;
866                         freeze = 1;
867                 }
868                 /* Kill all commands. EH will determine what actually failed. */
869                 if (freeze)
870                         ata_port_freeze(ap);
871                 else
872                         ata_port_abort(ap);
873                 return 1;
874         }
875
876         if (likely(flags & NV_CPB_RESP_DONE)) {
877                 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
878                 VPRINTK("CPB flags done, flags=0x%x\n", flags);
879                 if (likely(qc)) {
880                         DPRINTK("Completing qc from tag %d\n", cpb_num);
881                         ata_qc_complete(qc);
882                 } else {
883                         struct ata_eh_info *ehi = &ap->link.eh_info;
884                         /* Notifier bits set without a command may indicate the drive
885                            is misbehaving. Raise host state machine violation on this
886                            condition. */
887                         ata_port_printk(ap, KERN_ERR,
888                                         "notifier for tag %d with no cmd?\n",
889                                         cpb_num);
890                         ehi->err_mask |= AC_ERR_HSM;
891                         ehi->action |= ATA_EH_SOFTRESET;
892                         ata_port_freeze(ap);
893                         return 1;
894                 }
895         }
896         return 0;
897 }
898
899 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
900 {
901         struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
902
903         /* freeze if hotplugged */
904         if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
905                 ata_port_freeze(ap);
906                 return 1;
907         }
908
909         /* bail out if not our interrupt */
910         if (!(irq_stat & NV_INT_DEV))
911                 return 0;
912
913         /* DEV interrupt w/ no active qc? */
914         if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
915                 ata_check_status(ap);
916                 return 1;
917         }
918
919         /* handle interrupt */
920         return ata_host_intr(ap, qc);
921 }
922
923 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
924 {
925         struct ata_host *host = dev_instance;
926         int i, handled = 0;
927         u32 notifier_clears[2];
928
929         spin_lock(&host->lock);
930
931         for (i = 0; i < host->n_ports; i++) {
932                 struct ata_port *ap = host->ports[i];
933                 notifier_clears[i] = 0;
934
935                 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
936                         struct nv_adma_port_priv *pp = ap->private_data;
937                         void __iomem *mmio = pp->ctl_block;
938                         u16 status;
939                         u32 gen_ctl;
940                         u32 notifier, notifier_error;
941
942                         /* if ADMA is disabled, use standard ata interrupt handler */
943                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
944                                 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
945                                         >> (NV_INT_PORT_SHIFT * i);
946                                 handled += nv_host_intr(ap, irq_stat);
947                                 continue;
948                         }
949
950                         /* if in ATA register mode, check for standard interrupts */
951                         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
952                                 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
953                                         >> (NV_INT_PORT_SHIFT * i);
954                                 if (ata_tag_valid(ap->link.active_tag))
955                                         /** NV_INT_DEV indication seems unreliable at times
956                                             at least in ADMA mode. Force it on always when a
957                                             command is active, to prevent losing interrupts. */
958                                         irq_stat |= NV_INT_DEV;
959                                 handled += nv_host_intr(ap, irq_stat);
960                         }
961
962                         notifier = readl(mmio + NV_ADMA_NOTIFIER);
963                         notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
964                         notifier_clears[i] = notifier | notifier_error;
965
966                         gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
967
968                         if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
969                             !notifier_error)
970                                 /* Nothing to do */
971                                 continue;
972
973                         status = readw(mmio + NV_ADMA_STAT);
974
975                         /* Clear status. Ensure the controller sees the clearing before we start
976                            looking at any of the CPB statuses, so that any CPB completions after
977                            this point in the handler will raise another interrupt. */
978                         writew(status, mmio + NV_ADMA_STAT);
979                         readw(mmio + NV_ADMA_STAT); /* flush posted write */
980                         rmb();
981
982                         handled++; /* irq handled if we got here */
983
984                         /* freeze if hotplugged or controller error */
985                         if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
986                                                NV_ADMA_STAT_HOTUNPLUG |
987                                                NV_ADMA_STAT_TIMEOUT |
988                                                NV_ADMA_STAT_SERROR))) {
989                                 struct ata_eh_info *ehi = &ap->link.eh_info;
990
991                                 ata_ehi_clear_desc(ehi);
992                                 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
993                                 if (status & NV_ADMA_STAT_TIMEOUT) {
994                                         ehi->err_mask |= AC_ERR_SYSTEM;
995                                         ata_ehi_push_desc(ehi, "timeout");
996                                 } else if (status & NV_ADMA_STAT_HOTPLUG) {
997                                         ata_ehi_hotplugged(ehi);
998                                         ata_ehi_push_desc(ehi, "hotplug");
999                                 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1000                                         ata_ehi_hotplugged(ehi);
1001                                         ata_ehi_push_desc(ehi, "hot unplug");
1002                                 } else if (status & NV_ADMA_STAT_SERROR) {
1003                                         /* let libata analyze SError and figure out the cause */
1004                                         ata_ehi_push_desc(ehi, "SError");
1005                                 } else
1006                                         ata_ehi_push_desc(ehi, "unknown");
1007                                 ata_port_freeze(ap);
1008                                 continue;
1009                         }
1010
1011                         if (status & (NV_ADMA_STAT_DONE |
1012                                       NV_ADMA_STAT_CPBERR)) {
1013                                 u32 check_commands;
1014                                 int pos, error = 0;
1015
1016                                 if (ata_tag_valid(ap->link.active_tag))
1017                                         check_commands = 1 << ap->link.active_tag;
1018                                 else
1019                                         check_commands = ap->link.sactive;
1020
1021                                 /** Check CPBs for completed commands */
1022                                 while ((pos = ffs(check_commands)) && !error) {
1023                                         pos--;
1024                                         error = nv_adma_check_cpb(ap, pos,
1025                                                 notifier_error & (1 << pos));
1026                                         check_commands &= ~(1 << pos);
1027                                 }
1028                         }
1029                 }
1030         }
1031
1032         if (notifier_clears[0] || notifier_clears[1]) {
1033                 /* Note: Both notifier clear registers must be written
1034                    if either is set, even if one is zero, according to NVIDIA. */
1035                 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1036                 writel(notifier_clears[0], pp->notifier_clear_block);
1037                 pp = host->ports[1]->private_data;
1038                 writel(notifier_clears[1], pp->notifier_clear_block);
1039         }
1040
1041         spin_unlock(&host->lock);
1042
1043         return IRQ_RETVAL(handled);
1044 }
1045
1046 static void nv_adma_freeze(struct ata_port *ap)
1047 {
1048         struct nv_adma_port_priv *pp = ap->private_data;
1049         void __iomem *mmio = pp->ctl_block;
1050         u16 tmp;
1051
1052         nv_ck804_freeze(ap);
1053
1054         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1055                 return;
1056
1057         /* clear any outstanding CK804 notifications */
1058         writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1059                 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1060
1061         /* Disable interrupt */
1062         tmp = readw(mmio + NV_ADMA_CTL);
1063         writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1064                 mmio + NV_ADMA_CTL);
1065         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1066 }
1067
1068 static void nv_adma_thaw(struct ata_port *ap)
1069 {
1070         struct nv_adma_port_priv *pp = ap->private_data;
1071         void __iomem *mmio = pp->ctl_block;
1072         u16 tmp;
1073
1074         nv_ck804_thaw(ap);
1075
1076         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1077                 return;
1078
1079         /* Enable interrupt */
1080         tmp = readw(mmio + NV_ADMA_CTL);
1081         writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1082                 mmio + NV_ADMA_CTL);
1083         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1084 }
1085
1086 static void nv_adma_irq_clear(struct ata_port *ap)
1087 {
1088         struct nv_adma_port_priv *pp = ap->private_data;
1089         void __iomem *mmio = pp->ctl_block;
1090         u32 notifier_clears[2];
1091
1092         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1093                 ata_bmdma_irq_clear(ap);
1094                 return;
1095         }
1096
1097         /* clear any outstanding CK804 notifications */
1098         writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1099                 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1100
1101         /* clear ADMA status */
1102         writew(0xffff, mmio + NV_ADMA_STAT);
1103
1104         /* clear notifiers - note both ports need to be written with
1105            something even though we are only clearing on one */
1106         if (ap->port_no == 0) {
1107                 notifier_clears[0] = 0xFFFFFFFF;
1108                 notifier_clears[1] = 0;
1109         } else {
1110                 notifier_clears[0] = 0;
1111                 notifier_clears[1] = 0xFFFFFFFF;
1112         }
1113         pp = ap->host->ports[0]->private_data;
1114         writel(notifier_clears[0], pp->notifier_clear_block);
1115         pp = ap->host->ports[1]->private_data;
1116         writel(notifier_clears[1], pp->notifier_clear_block);
1117 }
1118
1119 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1120 {
1121         struct nv_adma_port_priv *pp = qc->ap->private_data;
1122
1123         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1124                 ata_bmdma_post_internal_cmd(qc);
1125 }
1126
1127 static int nv_adma_port_start(struct ata_port *ap)
1128 {
1129         struct device *dev = ap->host->dev;
1130         struct nv_adma_port_priv *pp;
1131         int rc;
1132         void *mem;
1133         dma_addr_t mem_dma;
1134         void __iomem *mmio;
1135         u16 tmp;
1136
1137         VPRINTK("ENTER\n");
1138
1139         rc = ata_port_start(ap);
1140         if (rc)
1141                 return rc;
1142
1143         pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1144         if (!pp)
1145                 return -ENOMEM;
1146
1147         mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1148                ap->port_no * NV_ADMA_PORT_SIZE;
1149         pp->ctl_block = mmio;
1150         pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1151         pp->notifier_clear_block = pp->gen_block +
1152                NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1153
1154         mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1155                                   &mem_dma, GFP_KERNEL);
1156         if (!mem)
1157                 return -ENOMEM;
1158         memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1159
1160         /*
1161          * First item in chunk of DMA memory:
1162          * 128-byte command parameter block (CPB)
1163          * one for each command tag
1164          */
1165         pp->cpb     = mem;
1166         pp->cpb_dma = mem_dma;
1167
1168         writel(mem_dma & 0xFFFFFFFF,    mmio + NV_ADMA_CPB_BASE_LOW);
1169         writel((mem_dma >> 16) >> 16,   mmio + NV_ADMA_CPB_BASE_HIGH);
1170
1171         mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1172         mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1173
1174         /*
1175          * Second item: block of ADMA_SGTBL_LEN s/g entries
1176          */
1177         pp->aprd = mem;
1178         pp->aprd_dma = mem_dma;
1179
1180         ap->private_data = pp;
1181
1182         /* clear any outstanding interrupt conditions */
1183         writew(0xffff, mmio + NV_ADMA_STAT);
1184
1185         /* initialize port variables */
1186         pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1187
1188         /* clear CPB fetch count */
1189         writew(0, mmio + NV_ADMA_CPB_COUNT);
1190
1191         /* clear GO for register mode, enable interrupt */
1192         tmp = readw(mmio + NV_ADMA_CTL);
1193         writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1194                 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1195
1196         tmp = readw(mmio + NV_ADMA_CTL);
1197         writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1198         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1199         udelay(1);
1200         writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1201         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1202
1203         return 0;
1204 }
1205
1206 static void nv_adma_port_stop(struct ata_port *ap)
1207 {
1208         struct nv_adma_port_priv *pp = ap->private_data;
1209         void __iomem *mmio = pp->ctl_block;
1210
1211         VPRINTK("ENTER\n");
1212         writew(0, mmio + NV_ADMA_CTL);
1213 }
1214
1215 #ifdef CONFIG_PM
1216 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1217 {
1218         struct nv_adma_port_priv *pp = ap->private_data;
1219         void __iomem *mmio = pp->ctl_block;
1220
1221         /* Go to register mode - clears GO */
1222         nv_adma_register_mode(ap);
1223
1224         /* clear CPB fetch count */
1225         writew(0, mmio + NV_ADMA_CPB_COUNT);
1226
1227         /* disable interrupt, shut down port */
1228         writew(0, mmio + NV_ADMA_CTL);
1229
1230         return 0;
1231 }
1232
1233 static int nv_adma_port_resume(struct ata_port *ap)
1234 {
1235         struct nv_adma_port_priv *pp = ap->private_data;
1236         void __iomem *mmio = pp->ctl_block;
1237         u16 tmp;
1238
1239         /* set CPB block location */
1240         writel(pp->cpb_dma & 0xFFFFFFFF,        mmio + NV_ADMA_CPB_BASE_LOW);
1241         writel((pp->cpb_dma >> 16) >> 16,       mmio + NV_ADMA_CPB_BASE_HIGH);
1242
1243         /* clear any outstanding interrupt conditions */
1244         writew(0xffff, mmio + NV_ADMA_STAT);
1245
1246         /* initialize port variables */
1247         pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1248
1249         /* clear CPB fetch count */
1250         writew(0, mmio + NV_ADMA_CPB_COUNT);
1251
1252         /* clear GO for register mode, enable interrupt */
1253         tmp = readw(mmio + NV_ADMA_CTL);
1254         writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1255                 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1256
1257         tmp = readw(mmio + NV_ADMA_CTL);
1258         writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1259         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1260         udelay(1);
1261         writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1262         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1263
1264         return 0;
1265 }
1266 #endif
1267
1268 static void nv_adma_setup_port(struct ata_port *ap)
1269 {
1270         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1271         struct ata_ioports *ioport = &ap->ioaddr;
1272
1273         VPRINTK("ENTER\n");
1274
1275         mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1276
1277         ioport->cmd_addr        = mmio;
1278         ioport->data_addr       = mmio + (ATA_REG_DATA * 4);
1279         ioport->error_addr      =
1280         ioport->feature_addr    = mmio + (ATA_REG_ERR * 4);
1281         ioport->nsect_addr      = mmio + (ATA_REG_NSECT * 4);
1282         ioport->lbal_addr       = mmio + (ATA_REG_LBAL * 4);
1283         ioport->lbam_addr       = mmio + (ATA_REG_LBAM * 4);
1284         ioport->lbah_addr       = mmio + (ATA_REG_LBAH * 4);
1285         ioport->device_addr     = mmio + (ATA_REG_DEVICE * 4);
1286         ioport->status_addr     =
1287         ioport->command_addr    = mmio + (ATA_REG_STATUS * 4);
1288         ioport->altstatus_addr  =
1289         ioport->ctl_addr        = mmio + 0x20;
1290 }
1291
1292 static int nv_adma_host_init(struct ata_host *host)
1293 {
1294         struct pci_dev *pdev = to_pci_dev(host->dev);
1295         unsigned int i;
1296         u32 tmp32;
1297
1298         VPRINTK("ENTER\n");
1299
1300         /* enable ADMA on the ports */
1301         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1302         tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1303                  NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1304                  NV_MCP_SATA_CFG_20_PORT1_EN |
1305                  NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1306
1307         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1308
1309         for (i = 0; i < host->n_ports; i++)
1310                 nv_adma_setup_port(host->ports[i]);
1311
1312         return 0;
1313 }
1314
1315 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1316                               struct scatterlist *sg,
1317                               int idx,
1318                               struct nv_adma_prd *aprd)
1319 {
1320         u8 flags = 0;
1321         if (qc->tf.flags & ATA_TFLAG_WRITE)
1322                 flags |= NV_APRD_WRITE;
1323         if (idx == qc->n_elem - 1)
1324                 flags |= NV_APRD_END;
1325         else if (idx != 4)
1326                 flags |= NV_APRD_CONT;
1327
1328         aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1329         aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1330         aprd->flags = flags;
1331         aprd->packet_len = 0;
1332 }
1333
1334 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1335 {
1336         struct nv_adma_port_priv *pp = qc->ap->private_data;
1337         unsigned int idx;
1338         struct nv_adma_prd *aprd;
1339         struct scatterlist *sg;
1340
1341         VPRINTK("ENTER\n");
1342
1343         idx = 0;
1344
1345         ata_for_each_sg(sg, qc) {
1346                 aprd = (idx < 5) ? &cpb->aprd[idx] :
1347                                &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1348                 nv_adma_fill_aprd(qc, sg, idx, aprd);
1349                 idx++;
1350         }
1351         if (idx > 5)
1352                 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1353         else
1354                 cpb->next_aprd = cpu_to_le64(0);
1355 }
1356
1357 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1358 {
1359         struct nv_adma_port_priv *pp = qc->ap->private_data;
1360
1361         /* ADMA engine can only be used for non-ATAPI DMA commands,
1362            or interrupt-driven no-data commands, where a result taskfile
1363            is not required. */
1364         if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1365            (qc->tf.flags & ATA_TFLAG_POLLING) ||
1366            (qc->flags & ATA_QCFLAG_RESULT_TF))
1367                 return 1;
1368
1369         if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1370            (qc->tf.protocol == ATA_PROT_NODATA))
1371                 return 0;
1372
1373         return 1;
1374 }
1375
1376 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1377 {
1378         struct nv_adma_port_priv *pp = qc->ap->private_data;
1379         struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1380         u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1381                        NV_CPB_CTL_IEN;
1382
1383         if (nv_adma_use_reg_mode(qc)) {
1384                 nv_adma_register_mode(qc->ap);
1385                 ata_qc_prep(qc);
1386                 return;
1387         }
1388
1389         cpb->resp_flags = NV_CPB_RESP_DONE;
1390         wmb();
1391         cpb->ctl_flags = 0;
1392         wmb();
1393
1394         cpb->len                = 3;
1395         cpb->tag                = qc->tag;
1396         cpb->next_cpb_idx       = 0;
1397
1398         /* turn on NCQ flags for NCQ commands */
1399         if (qc->tf.protocol == ATA_PROT_NCQ)
1400                 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1401
1402         VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1403
1404         nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1405
1406         if (qc->flags & ATA_QCFLAG_DMAMAP) {
1407                 nv_adma_fill_sg(qc, cpb);
1408                 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1409         } else
1410                 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1411
1412         /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1413            until we are finished filling in all of the contents */
1414         wmb();
1415         cpb->ctl_flags = ctl_flags;
1416         wmb();
1417         cpb->resp_flags = 0;
1418 }
1419
1420 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1421 {
1422         struct nv_adma_port_priv *pp = qc->ap->private_data;
1423         void __iomem *mmio = pp->ctl_block;
1424         int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1425
1426         VPRINTK("ENTER\n");
1427
1428         if (nv_adma_use_reg_mode(qc)) {
1429                 /* use ATA register mode */
1430                 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1431                 nv_adma_register_mode(qc->ap);
1432                 return ata_qc_issue_prot(qc);
1433         } else
1434                 nv_adma_mode(qc->ap);
1435
1436         /* write append register, command tag in lower 8 bits
1437            and (number of cpbs to append -1) in top 8 bits */
1438         wmb();
1439
1440         if (curr_ncq != pp->last_issue_ncq) {
1441                 /* Seems to need some delay before switching between NCQ and
1442                    non-NCQ commands, else we get command timeouts and such. */
1443                 udelay(20);
1444                 pp->last_issue_ncq = curr_ncq;
1445         }
1446
1447         writew(qc->tag, mmio + NV_ADMA_APPEND);
1448
1449         DPRINTK("Issued tag %u\n", qc->tag);
1450
1451         return 0;
1452 }
1453
1454 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1455 {
1456         struct ata_host *host = dev_instance;
1457         unsigned int i;
1458         unsigned int handled = 0;
1459         unsigned long flags;
1460
1461         spin_lock_irqsave(&host->lock, flags);
1462
1463         for (i = 0; i < host->n_ports; i++) {
1464                 struct ata_port *ap;
1465
1466                 ap = host->ports[i];
1467                 if (ap &&
1468                     !(ap->flags & ATA_FLAG_DISABLED)) {
1469                         struct ata_queued_cmd *qc;
1470
1471                         qc = ata_qc_from_tag(ap, ap->link.active_tag);
1472                         if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1473                                 handled += ata_host_intr(ap, qc);
1474                         else
1475                                 // No request pending?  Clear interrupt status
1476                                 // anyway, in case there's one pending.
1477                                 ap->ops->check_status(ap);
1478                 }
1479
1480         }
1481
1482         spin_unlock_irqrestore(&host->lock, flags);
1483
1484         return IRQ_RETVAL(handled);
1485 }
1486
1487 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1488 {
1489         int i, handled = 0;
1490
1491         for (i = 0; i < host->n_ports; i++) {
1492                 struct ata_port *ap = host->ports[i];
1493
1494                 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1495                         handled += nv_host_intr(ap, irq_stat);
1496
1497                 irq_stat >>= NV_INT_PORT_SHIFT;
1498         }
1499
1500         return IRQ_RETVAL(handled);
1501 }
1502
1503 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1504 {
1505         struct ata_host *host = dev_instance;
1506         u8 irq_stat;
1507         irqreturn_t ret;
1508
1509         spin_lock(&host->lock);
1510         irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1511         ret = nv_do_interrupt(host, irq_stat);
1512         spin_unlock(&host->lock);
1513
1514         return ret;
1515 }
1516
1517 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1518 {
1519         struct ata_host *host = dev_instance;
1520         u8 irq_stat;
1521         irqreturn_t ret;
1522
1523         spin_lock(&host->lock);
1524         irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1525         ret = nv_do_interrupt(host, irq_stat);
1526         spin_unlock(&host->lock);
1527
1528         return ret;
1529 }
1530
1531 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1532 {
1533         if (sc_reg > SCR_CONTROL)
1534                 return -EINVAL;
1535
1536         *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1537         return 0;
1538 }
1539
1540 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1541 {
1542         if (sc_reg > SCR_CONTROL)
1543                 return -EINVAL;
1544
1545         iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1546         return 0;
1547 }
1548
1549 static void nv_nf2_freeze(struct ata_port *ap)
1550 {
1551         void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1552         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1553         u8 mask;
1554
1555         mask = ioread8(scr_addr + NV_INT_ENABLE);
1556         mask &= ~(NV_INT_ALL << shift);
1557         iowrite8(mask, scr_addr + NV_INT_ENABLE);
1558 }
1559
1560 static void nv_nf2_thaw(struct ata_port *ap)
1561 {
1562         void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1563         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1564         u8 mask;
1565
1566         iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1567
1568         mask = ioread8(scr_addr + NV_INT_ENABLE);
1569         mask |= (NV_INT_MASK << shift);
1570         iowrite8(mask, scr_addr + NV_INT_ENABLE);
1571 }
1572
1573 static void nv_ck804_freeze(struct ata_port *ap)
1574 {
1575         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1576         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1577         u8 mask;
1578
1579         mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1580         mask &= ~(NV_INT_ALL << shift);
1581         writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1582 }
1583
1584 static void nv_ck804_thaw(struct ata_port *ap)
1585 {
1586         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1587         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1588         u8 mask;
1589
1590         writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1591
1592         mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1593         mask |= (NV_INT_MASK << shift);
1594         writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1595 }
1596
1597 static void nv_mcp55_freeze(struct ata_port *ap)
1598 {
1599         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1600         int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1601         u32 mask;
1602
1603         writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1604
1605         mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1606         mask &= ~(NV_INT_ALL_MCP55 << shift);
1607         writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1608         ata_bmdma_freeze(ap);
1609 }
1610
1611 static void nv_mcp55_thaw(struct ata_port *ap)
1612 {
1613         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1614         int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1615         u32 mask;
1616
1617         writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1618
1619         mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1620         mask |= (NV_INT_MASK_MCP55 << shift);
1621         writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1622         ata_bmdma_thaw(ap);
1623 }
1624
1625 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1626                         unsigned long deadline)
1627 {
1628         unsigned int dummy;
1629
1630         /* SATA hardreset fails to retrieve proper device signature on
1631          * some controllers.  Don't classify on hardreset.  For more
1632          * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1633          */
1634         return sata_std_hardreset(link, &dummy, deadline);
1635 }
1636
1637 static void nv_error_handler(struct ata_port *ap)
1638 {
1639         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1640                            nv_hardreset, ata_std_postreset);
1641 }
1642
1643 static void nv_adma_error_handler(struct ata_port *ap)
1644 {
1645         struct nv_adma_port_priv *pp = ap->private_data;
1646         if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1647                 void __iomem *mmio = pp->ctl_block;
1648                 int i;
1649                 u16 tmp;
1650
1651                 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1652                         u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1653                         u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1654                         u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1655                         u32 status = readw(mmio + NV_ADMA_STAT);
1656                         u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1657                         u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1658
1659                         ata_port_printk(ap, KERN_ERR,
1660                                 "EH in ADMA mode, notifier 0x%X "
1661                                 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1662                                 "next cpb count 0x%X next cpb idx 0x%x\n",
1663                                 notifier, notifier_error, gen_ctl, status,
1664                                 cpb_count, next_cpb_idx);
1665
1666                         for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1667                                 struct nv_adma_cpb *cpb = &pp->cpb[i];
1668                                 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1669                                     ap->link.sactive & (1 << i))
1670                                         ata_port_printk(ap, KERN_ERR,
1671                                                 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1672                                                 i, cpb->ctl_flags, cpb->resp_flags);
1673                         }
1674                 }
1675
1676                 /* Push us back into port register mode for error handling. */
1677                 nv_adma_register_mode(ap);
1678
1679                 /* Mark all of the CPBs as invalid to prevent them from
1680                    being executed */
1681                 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1682                         pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1683
1684                 /* clear CPB fetch count */
1685                 writew(0, mmio + NV_ADMA_CPB_COUNT);
1686
1687                 /* Reset channel */
1688                 tmp = readw(mmio + NV_ADMA_CTL);
1689                 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1690                 readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1691                 udelay(1);
1692                 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1693                 readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1694         }
1695
1696         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1697                            nv_hardreset, ata_std_postreset);
1698 }
1699
1700 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1701 {
1702         struct nv_swncq_port_priv *pp = ap->private_data;
1703         struct defer_queue *dq = &pp->defer_queue;
1704
1705         /* queue is full */
1706         WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1707         dq->defer_bits |= (1 << qc->tag);
1708         dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1709 }
1710
1711 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1712 {
1713         struct nv_swncq_port_priv *pp = ap->private_data;
1714         struct defer_queue *dq = &pp->defer_queue;
1715         unsigned int tag;
1716
1717         if (dq->head == dq->tail)       /* null queue */
1718                 return NULL;
1719
1720         tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1721         dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1722         WARN_ON(!(dq->defer_bits & (1 << tag)));
1723         dq->defer_bits &= ~(1 << tag);
1724
1725         return ata_qc_from_tag(ap, tag);
1726 }
1727
1728 static void nv_swncq_fis_reinit(struct ata_port *ap)
1729 {
1730         struct nv_swncq_port_priv *pp = ap->private_data;
1731
1732         pp->dhfis_bits = 0;
1733         pp->dmafis_bits = 0;
1734         pp->sdbfis_bits = 0;
1735         pp->ncq_flags = 0;
1736 }
1737
1738 static void nv_swncq_pp_reinit(struct ata_port *ap)
1739 {
1740         struct nv_swncq_port_priv *pp = ap->private_data;
1741         struct defer_queue *dq = &pp->defer_queue;
1742
1743         dq->head = 0;
1744         dq->tail = 0;
1745         dq->defer_bits = 0;
1746         pp->qc_active = 0;
1747         pp->last_issue_tag = ATA_TAG_POISON;
1748         nv_swncq_fis_reinit(ap);
1749 }
1750
1751 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1752 {
1753         struct nv_swncq_port_priv *pp = ap->private_data;
1754
1755         writew(fis, pp->irq_block);
1756 }
1757
1758 static void __ata_bmdma_stop(struct ata_port *ap)
1759 {
1760         struct ata_queued_cmd qc;
1761
1762         qc.ap = ap;
1763         ata_bmdma_stop(&qc);
1764 }
1765
1766 static void nv_swncq_ncq_stop(struct ata_port *ap)
1767 {
1768         struct nv_swncq_port_priv *pp = ap->private_data;
1769         unsigned int i;
1770         u32 sactive;
1771         u32 done_mask;
1772
1773         ata_port_printk(ap, KERN_ERR,
1774                         "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1775                         ap->qc_active, ap->link.sactive);
1776         ata_port_printk(ap, KERN_ERR,
1777                 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1778                 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1779                 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1780                 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1781
1782         ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1783                         ap->ops->check_status(ap),
1784                         ioread8(ap->ioaddr.error_addr));
1785
1786         sactive = readl(pp->sactive_block);
1787         done_mask = pp->qc_active ^ sactive;
1788
1789         ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1790         for (i = 0; i < ATA_MAX_QUEUE; i++) {
1791                 u8 err = 0;
1792                 if (pp->qc_active & (1 << i))
1793                         err = 0;
1794                 else if (done_mask & (1 << i))
1795                         err = 1;
1796                 else
1797                         continue;
1798
1799                 ata_port_printk(ap, KERN_ERR,
1800                                 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1801                                 (pp->dhfis_bits >> i) & 0x1,
1802                                 (pp->dmafis_bits >> i) & 0x1,
1803                                 (pp->sdbfis_bits >> i) & 0x1,
1804                                 (sactive >> i) & 0x1,
1805                                 (err ? "error! tag doesn't exit" : " "));
1806         }
1807
1808         nv_swncq_pp_reinit(ap);
1809         ap->ops->irq_clear(ap);
1810         __ata_bmdma_stop(ap);
1811         nv_swncq_irq_clear(ap, 0xffff);
1812 }
1813
1814 static void nv_swncq_error_handler(struct ata_port *ap)
1815 {
1816         struct ata_eh_context *ehc = &ap->link.eh_context;
1817
1818         if (ap->link.sactive) {
1819                 nv_swncq_ncq_stop(ap);
1820                 ehc->i.action |= ATA_EH_HARDRESET;
1821         }
1822
1823         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1824                            nv_hardreset, ata_std_postreset);
1825 }
1826
1827 #ifdef CONFIG_PM
1828 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1829 {
1830         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1831         u32 tmp;
1832
1833         /* clear irq */
1834         writel(~0, mmio + NV_INT_STATUS_MCP55);
1835
1836         /* disable irq */
1837         writel(0, mmio + NV_INT_ENABLE_MCP55);
1838
1839         /* disable swncq */
1840         tmp = readl(mmio + NV_CTL_MCP55);
1841         tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1842         writel(tmp, mmio + NV_CTL_MCP55);
1843
1844         return 0;
1845 }
1846
1847 static int nv_swncq_port_resume(struct ata_port *ap)
1848 {
1849         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1850         u32 tmp;
1851
1852         /* clear irq */
1853         writel(~0, mmio + NV_INT_STATUS_MCP55);
1854
1855         /* enable irq */
1856         writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1857
1858         /* enable swncq */
1859         tmp = readl(mmio + NV_CTL_MCP55);
1860         writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1861
1862         return 0;
1863 }
1864 #endif
1865
1866 static void nv_swncq_host_init(struct ata_host *host)
1867 {
1868         u32 tmp;
1869         void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1870         struct pci_dev *pdev = to_pci_dev(host->dev);
1871         u8 regval;
1872
1873         /* disable  ECO 398 */
1874         pci_read_config_byte(pdev, 0x7f, &regval);
1875         regval &= ~(1 << 7);
1876         pci_write_config_byte(pdev, 0x7f, regval);
1877
1878         /* enable swncq */
1879         tmp = readl(mmio + NV_CTL_MCP55);
1880         VPRINTK("HOST_CTL:0x%X\n", tmp);
1881         writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1882
1883         /* enable irq intr */
1884         tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1885         VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1886         writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1887
1888         /*  clear port irq */
1889         writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1890 }
1891
1892 static int nv_swncq_slave_config(struct scsi_device *sdev)
1893 {
1894         struct ata_port *ap = ata_shost_to_port(sdev->host);
1895         struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1896         struct ata_device *dev;
1897         int rc;
1898         u8 rev;
1899         u8 check_maxtor = 0;
1900         unsigned char model_num[ATA_ID_PROD_LEN + 1];
1901
1902         rc = ata_scsi_slave_config(sdev);
1903         if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1904                 /* Not a proper libata device, ignore */
1905                 return rc;
1906
1907         dev = &ap->link.device[sdev->id];
1908         if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1909                 return rc;
1910
1911         /* if MCP51 and Maxtor, then disable ncq */
1912         if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1913                 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1914                 check_maxtor = 1;
1915
1916         /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1917         if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1918                 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1919                 pci_read_config_byte(pdev, 0x8, &rev);
1920                 if (rev <= 0xa2)
1921                         check_maxtor = 1;
1922         }
1923
1924         if (!check_maxtor)
1925                 return rc;
1926
1927         ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1928
1929         if (strncmp(model_num, "Maxtor", 6) == 0) {
1930                 ata_scsi_change_queue_depth(sdev, 1);
1931                 ata_dev_printk(dev, KERN_NOTICE,
1932                         "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1933         }
1934
1935         return rc;
1936 }
1937
1938 static int nv_swncq_port_start(struct ata_port *ap)
1939 {
1940         struct device *dev = ap->host->dev;
1941         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1942         struct nv_swncq_port_priv *pp;
1943         int rc;
1944
1945         rc = ata_port_start(ap);
1946         if (rc)
1947                 return rc;
1948
1949         pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1950         if (!pp)
1951                 return -ENOMEM;
1952
1953         pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1954                                       &pp->prd_dma, GFP_KERNEL);
1955         if (!pp->prd)
1956                 return -ENOMEM;
1957         memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1958
1959         ap->private_data = pp;
1960         pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1961         pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1962         pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1963
1964         return 0;
1965 }
1966
1967 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1968 {
1969         if (qc->tf.protocol != ATA_PROT_NCQ) {
1970                 ata_qc_prep(qc);
1971                 return;
1972         }
1973
1974         if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1975                 return;
1976
1977         nv_swncq_fill_sg(qc);
1978 }
1979
1980 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1981 {
1982         struct ata_port *ap = qc->ap;
1983         struct scatterlist *sg;
1984         unsigned int idx;
1985         struct nv_swncq_port_priv *pp = ap->private_data;
1986         struct ata_prd *prd;
1987
1988         WARN_ON(qc->__sg == NULL);
1989         WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1990
1991         prd = pp->prd + ATA_MAX_PRD * qc->tag;
1992
1993         idx = 0;
1994         ata_for_each_sg(sg, qc) {
1995                 u32 addr, offset;
1996                 u32 sg_len, len;
1997
1998                 addr = (u32)sg_dma_address(sg);
1999                 sg_len = sg_dma_len(sg);
2000
2001                 while (sg_len) {
2002                         offset = addr & 0xffff;
2003                         len = sg_len;
2004                         if ((offset + sg_len) > 0x10000)
2005                                 len = 0x10000 - offset;
2006
2007                         prd[idx].addr = cpu_to_le32(addr);
2008                         prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2009
2010                         idx++;
2011                         sg_len -= len;
2012                         addr += len;
2013                 }
2014         }
2015
2016         if (idx)
2017                 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2018 }
2019
2020 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2021                                           struct ata_queued_cmd *qc)
2022 {
2023         struct nv_swncq_port_priv *pp = ap->private_data;
2024
2025         if (qc == NULL)
2026                 return 0;
2027
2028         DPRINTK("Enter\n");
2029
2030         writel((1 << qc->tag), pp->sactive_block);
2031         pp->last_issue_tag = qc->tag;
2032         pp->dhfis_bits &= ~(1 << qc->tag);
2033         pp->dmafis_bits &= ~(1 << qc->tag);
2034         pp->qc_active |= (0x1 << qc->tag);
2035
2036         ap->ops->tf_load(ap, &qc->tf);   /* load tf registers */
2037         ap->ops->exec_command(ap, &qc->tf);
2038
2039         DPRINTK("Issued tag %u\n", qc->tag);
2040
2041         return 0;
2042 }
2043
2044 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2045 {
2046         struct ata_port *ap = qc->ap;
2047         struct nv_swncq_port_priv *pp = ap->private_data;
2048
2049         if (qc->tf.protocol != ATA_PROT_NCQ)
2050                 return ata_qc_issue_prot(qc);
2051
2052         DPRINTK("Enter\n");
2053
2054         if (!pp->qc_active)
2055                 nv_swncq_issue_atacmd(ap, qc);
2056         else
2057                 nv_swncq_qc_to_dq(ap, qc);      /* add qc to defer queue */
2058
2059         return 0;
2060 }
2061
2062 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2063 {
2064         u32 serror;
2065         struct ata_eh_info *ehi = &ap->link.eh_info;
2066
2067         ata_ehi_clear_desc(ehi);
2068
2069         /* AHCI needs SError cleared; otherwise, it might lock up */
2070         sata_scr_read(&ap->link, SCR_ERROR, &serror);
2071         sata_scr_write(&ap->link, SCR_ERROR, serror);
2072
2073         /* analyze @irq_stat */
2074         if (fis & NV_SWNCQ_IRQ_ADDED)
2075                 ata_ehi_push_desc(ehi, "hot plug");
2076         else if (fis & NV_SWNCQ_IRQ_REMOVED)
2077                 ata_ehi_push_desc(ehi, "hot unplug");
2078
2079         ata_ehi_hotplugged(ehi);
2080
2081         /* okay, let's hand over to EH */
2082         ehi->serror |= serror;
2083
2084         ata_port_freeze(ap);
2085 }
2086
2087 static int nv_swncq_sdbfis(struct ata_port *ap)
2088 {
2089         struct ata_queued_cmd *qc;
2090         struct nv_swncq_port_priv *pp = ap->private_data;
2091         struct ata_eh_info *ehi = &ap->link.eh_info;
2092         u32 sactive;
2093         int nr_done = 0;
2094         u32 done_mask;
2095         int i;
2096         u8 host_stat;
2097         u8 lack_dhfis = 0;
2098
2099         host_stat = ap->ops->bmdma_status(ap);
2100         if (unlikely(host_stat & ATA_DMA_ERR)) {
2101                 /* error when transfering data to/from memory */
2102                 ata_ehi_clear_desc(ehi);
2103                 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2104                 ehi->err_mask |= AC_ERR_HOST_BUS;
2105                 ehi->action |= ATA_EH_SOFTRESET;
2106                 return -EINVAL;
2107         }
2108
2109         ap->ops->irq_clear(ap);
2110         __ata_bmdma_stop(ap);
2111
2112         sactive = readl(pp->sactive_block);
2113         done_mask = pp->qc_active ^ sactive;
2114
2115         if (unlikely(done_mask & sactive)) {
2116                 ata_ehi_clear_desc(ehi);
2117                 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2118                                   "(%08x->%08x)", pp->qc_active, sactive);
2119                 ehi->err_mask |= AC_ERR_HSM;
2120                 ehi->action |= ATA_EH_HARDRESET;
2121                 return -EINVAL;
2122         }
2123         for (i = 0; i < ATA_MAX_QUEUE; i++) {
2124                 if (!(done_mask & (1 << i)))
2125                         continue;
2126
2127                 qc = ata_qc_from_tag(ap, i);
2128                 if (qc) {
2129                         ata_qc_complete(qc);
2130                         pp->qc_active &= ~(1 << i);
2131                         pp->dhfis_bits &= ~(1 << i);
2132                         pp->dmafis_bits &= ~(1 << i);
2133                         pp->sdbfis_bits |= (1 << i);
2134                         nr_done++;
2135                 }
2136         }
2137
2138         if (!ap->qc_active) {
2139                 DPRINTK("over\n");
2140                 nv_swncq_pp_reinit(ap);
2141                 return nr_done;
2142         }
2143
2144         if (pp->qc_active & pp->dhfis_bits)
2145                 return nr_done;
2146
2147         if ((pp->ncq_flags & ncq_saw_backout) ||
2148             (pp->qc_active ^ pp->dhfis_bits))
2149                 /* if the controller cann't get a device to host register FIS,
2150                  * The driver needs to reissue the new command.
2151                  */
2152                 lack_dhfis = 1;
2153
2154         DPRINTK("id 0x%x QC: qc_active 0x%x,"
2155                 "SWNCQ:qc_active 0x%X defer_bits %X "
2156                 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2157                 ap->print_id, ap->qc_active, pp->qc_active,
2158                 pp->defer_queue.defer_bits, pp->dhfis_bits,
2159                 pp->dmafis_bits, pp->last_issue_tag);
2160
2161         nv_swncq_fis_reinit(ap);
2162
2163         if (lack_dhfis) {
2164                 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2165                 nv_swncq_issue_atacmd(ap, qc);
2166                 return nr_done;
2167         }
2168
2169         if (pp->defer_queue.defer_bits) {
2170                 /* send deferral queue command */
2171                 qc = nv_swncq_qc_from_dq(ap);
2172                 WARN_ON(qc == NULL);
2173                 nv_swncq_issue_atacmd(ap, qc);
2174         }
2175
2176         return nr_done;
2177 }
2178
2179 static inline u32 nv_swncq_tag(struct ata_port *ap)
2180 {
2181         struct nv_swncq_port_priv *pp = ap->private_data;
2182         u32 tag;
2183
2184         tag = readb(pp->tag_block) >> 2;
2185         return (tag & 0x1f);
2186 }
2187
2188 static int nv_swncq_dmafis(struct ata_port *ap)
2189 {
2190         struct ata_queued_cmd *qc;
2191         unsigned int rw;
2192         u8 dmactl;
2193         u32 tag;
2194         struct nv_swncq_port_priv *pp = ap->private_data;
2195
2196         __ata_bmdma_stop(ap);
2197         tag = nv_swncq_tag(ap);
2198
2199         DPRINTK("dma setup tag 0x%x\n", tag);
2200         qc = ata_qc_from_tag(ap, tag);
2201
2202         if (unlikely(!qc))
2203                 return 0;
2204
2205         rw = qc->tf.flags & ATA_TFLAG_WRITE;
2206
2207         /* load PRD table addr. */
2208         iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2209                   ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2210
2211         /* specify data direction, triple-check start bit is clear */
2212         dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2213         dmactl &= ~ATA_DMA_WR;
2214         if (!rw)
2215                 dmactl |= ATA_DMA_WR;
2216
2217         iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2218
2219         return 1;
2220 }
2221
2222 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2223 {
2224         struct nv_swncq_port_priv *pp = ap->private_data;
2225         struct ata_queued_cmd *qc;
2226         struct ata_eh_info *ehi = &ap->link.eh_info;
2227         u32 serror;
2228         u8 ata_stat;
2229         int rc = 0;
2230
2231         ata_stat = ap->ops->check_status(ap);
2232         nv_swncq_irq_clear(ap, fis);
2233         if (!fis)
2234                 return;
2235
2236         if (ap->pflags & ATA_PFLAG_FROZEN)
2237                 return;
2238
2239         if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2240                 nv_swncq_hotplug(ap, fis);
2241                 return;
2242         }
2243
2244         if (!pp->qc_active)
2245                 return;
2246
2247         if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2248                 return;
2249         ap->ops->scr_write(ap, SCR_ERROR, serror);
2250
2251         if (ata_stat & ATA_ERR) {
2252                 ata_ehi_clear_desc(ehi);
2253                 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2254                 ehi->err_mask |= AC_ERR_DEV;
2255                 ehi->serror |= serror;
2256                 ehi->action |= ATA_EH_SOFTRESET;
2257                 ata_port_freeze(ap);
2258                 return;
2259         }
2260
2261         if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2262                 /* If the IRQ is backout, driver must issue
2263                  * the new command again some time later.
2264                  */
2265                 pp->ncq_flags |= ncq_saw_backout;
2266         }
2267
2268         if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2269                 pp->ncq_flags |= ncq_saw_sdb;
2270                 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2271                         "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2272                         ap->print_id, pp->qc_active, pp->dhfis_bits,
2273                         pp->dmafis_bits, readl(pp->sactive_block));
2274                 rc = nv_swncq_sdbfis(ap);
2275                 if (rc < 0)
2276                         goto irq_error;
2277         }
2278
2279         if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2280                 /* The interrupt indicates the new command
2281                  * was transmitted correctly to the drive.
2282                  */
2283                 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2284                 pp->ncq_flags |= ncq_saw_d2h;
2285                 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2286                         ata_ehi_push_desc(ehi, "illegal fis transaction");
2287                         ehi->err_mask |= AC_ERR_HSM;
2288                         ehi->action |= ATA_EH_HARDRESET;
2289                         goto irq_error;
2290                 }
2291
2292                 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2293                     !(pp->ncq_flags & ncq_saw_dmas)) {
2294                         ata_stat = ap->ops->check_status(ap);
2295                         if (ata_stat & ATA_BUSY)
2296                                 goto irq_exit;
2297
2298                         if (pp->defer_queue.defer_bits) {
2299                                 DPRINTK("send next command\n");
2300                                 qc = nv_swncq_qc_from_dq(ap);
2301                                 nv_swncq_issue_atacmd(ap, qc);
2302                         }
2303                 }
2304         }
2305
2306         if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2307                 /* program the dma controller with appropriate PRD buffers
2308                  * and start the DMA transfer for requested command.
2309                  */
2310                 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2311                 pp->ncq_flags |= ncq_saw_dmas;
2312                 rc = nv_swncq_dmafis(ap);
2313         }
2314
2315 irq_exit:
2316         return;
2317 irq_error:
2318         ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2319         ata_port_freeze(ap);
2320         return;
2321 }
2322
2323 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2324 {
2325         struct ata_host *host = dev_instance;
2326         unsigned int i;
2327         unsigned int handled = 0;
2328         unsigned long flags;
2329         u32 irq_stat;
2330
2331         spin_lock_irqsave(&host->lock, flags);
2332
2333         irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2334
2335         for (i = 0; i < host->n_ports; i++) {
2336                 struct ata_port *ap = host->ports[i];
2337
2338                 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2339                         if (ap->link.sactive) {
2340                                 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2341                                 handled = 1;
2342                         } else {
2343                                 if (irq_stat)   /* reserve Hotplug */
2344                                         nv_swncq_irq_clear(ap, 0xfff0);
2345
2346                                 handled += nv_host_intr(ap, (u8)irq_stat);
2347                         }
2348                 }
2349                 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2350         }
2351
2352         spin_unlock_irqrestore(&host->lock, flags);
2353
2354         return IRQ_RETVAL(handled);
2355 }
2356
2357 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2358 {
2359         static int printed_version;
2360         const struct ata_port_info *ppi[] = { NULL, NULL };
2361         struct ata_host *host;
2362         struct nv_host_priv *hpriv;
2363         int rc;
2364         u32 bar;
2365         void __iomem *base;
2366         unsigned long type = ent->driver_data;
2367
2368         // Make sure this is a SATA controller by counting the number of bars
2369         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2370         // it's an IDE controller and we ignore it.
2371         for (bar = 0; bar < 6; bar++)
2372                 if (pci_resource_start(pdev, bar) == 0)
2373                         return -ENODEV;
2374
2375         if (!printed_version++)
2376                 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2377
2378         rc = pcim_enable_device(pdev);
2379         if (rc)
2380                 return rc;
2381
2382         /* determine type and allocate host */
2383         if (type == CK804 && adma_enabled) {
2384                 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2385                 type = ADMA;
2386         }
2387
2388         if (type == SWNCQ) {
2389                 if (swncq_enabled)
2390                         dev_printk(KERN_NOTICE, &pdev->dev,
2391                                    "Using SWNCQ mode\n");
2392                 else
2393                         type = GENERIC;
2394         }
2395
2396         ppi[0] = &nv_port_info[type];
2397         rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
2398         if (rc)
2399                 return rc;
2400
2401         hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2402         if (!hpriv)
2403                 return -ENOMEM;
2404         hpriv->type = type;
2405         host->private_data = hpriv;
2406
2407         /* set 64bit dma masks, may fail */
2408         if (type == ADMA) {
2409                 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
2410                         pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2411         }
2412
2413         /* request and iomap NV_MMIO_BAR */
2414         rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2415         if (rc)
2416                 return rc;
2417
2418         /* configure SCR access */
2419         base = host->iomap[NV_MMIO_BAR];
2420         host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2421         host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2422
2423         /* enable SATA space for CK804 */
2424         if (type >= CK804) {
2425                 u8 regval;
2426
2427                 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2428                 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2429                 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2430         }
2431
2432         /* init ADMA */
2433         if (type == ADMA) {
2434                 rc = nv_adma_host_init(host);
2435                 if (rc)
2436                         return rc;
2437         } else if (type == SWNCQ)
2438                 nv_swncq_host_init(host);
2439
2440         pci_set_master(pdev);
2441         return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
2442                                  IRQF_SHARED, ppi[0]->sht);
2443 }
2444
2445 #ifdef CONFIG_PM
2446 static int nv_pci_device_resume(struct pci_dev *pdev)
2447 {
2448         struct ata_host *host = dev_get_drvdata(&pdev->dev);
2449         struct nv_host_priv *hpriv = host->private_data;
2450         int rc;
2451
2452         rc = ata_pci_device_do_resume(pdev);
2453         if (rc)
2454                 return rc;
2455
2456         if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2457                 if (hpriv->type >= CK804) {
2458                         u8 regval;
2459
2460                         pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2461                         regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2462                         pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2463                 }
2464                 if (hpriv->type == ADMA) {
2465                         u32 tmp32;
2466                         struct nv_adma_port_priv *pp;
2467                         /* enable/disable ADMA on the ports appropriately */
2468                         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2469
2470                         pp = host->ports[0]->private_data;
2471                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2472                                 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2473                                            NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2474                         else
2475                                 tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2476                                            NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2477                         pp = host->ports[1]->private_data;
2478                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2479                                 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2480                                            NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2481                         else
2482                                 tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2483                                            NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2484
2485                         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2486                 }
2487         }
2488
2489         ata_host_resume(host);
2490
2491         return 0;
2492 }
2493 #endif
2494
2495 static void nv_ck804_host_stop(struct ata_host *host)
2496 {
2497         struct pci_dev *pdev = to_pci_dev(host->dev);
2498         u8 regval;
2499
2500         /* disable SATA space for CK804 */
2501         pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2502         regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2503         pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2504 }
2505
2506 static void nv_adma_host_stop(struct ata_host *host)
2507 {
2508         struct pci_dev *pdev = to_pci_dev(host->dev);
2509         u32 tmp32;
2510
2511         /* disable ADMA on the ports */
2512         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2513         tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2514                    NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2515                    NV_MCP_SATA_CFG_20_PORT1_EN |
2516                    NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2517
2518         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2519
2520         nv_ck804_host_stop(host);
2521 }
2522
2523 static int __init nv_init(void)
2524 {
2525         return pci_register_driver(&nv_pci_driver);
2526 }
2527
2528 static void __exit nv_exit(void)
2529 {
2530         pci_unregister_driver(&nv_pci_driver);
2531 }
2532
2533 module_init(nv_init);
2534 module_exit(nv_exit);
2535 module_param_named(adma, adma_enabled, bool, 0444);
2536 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2537 module_param_named(swncq, swncq_enabled, bool, 0444);
2538 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
2539