2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 7) Test and verify 3.0 Gbps support
40 8) Develop a low-power-consumption strategy, and implement it.
42 9) [Experiment, low priority] See if ATAPI can be supported using
43 "unknown FIS" or "vendor-specific FIS" support, or something creative
46 10) [Experiment, low priority] Investigate interrupt coalescing.
47 Quite often, especially with PCI Message Signalled Interrupts (MSI),
48 the overhead reduced by interrupt mitigation is quite often not
49 worth the latency cost.
51 11) [Experiment, Marvell value added] Is it possible to use target
52 mode to cross-connect two Linux boxes with Marvell cards? If so,
53 creating LibATA target mode support would be very interesting.
55 Target mode, for those without docs, is the ability to directly
56 connect two SATA controllers.
58 13) Verify that 7042 is fully supported. I only have a 6042.
63 #include <linux/kernel.h>
64 #include <linux/module.h>
65 #include <linux/pci.h>
66 #include <linux/init.h>
67 #include <linux/blkdev.h>
68 #include <linux/delay.h>
69 #include <linux/interrupt.h>
70 #include <linux/dma-mapping.h>
71 #include <linux/device.h>
72 #include <scsi/scsi_host.h>
73 #include <scsi/scsi_cmnd.h>
74 #include <linux/libata.h>
76 #define DRV_NAME "sata_mv"
77 #define DRV_VERSION "0.81"
80 /* BAR's are enumerated in terms of pci_resource_start() terms */
81 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
82 MV_IO_BAR = 2, /* offset 0x18: IO space */
83 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
85 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
86 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
89 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
90 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
91 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
92 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
93 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
94 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
96 MV_SATAHC0_REG_BASE = 0x20000,
97 MV_FLASH_CTL = 0x1046c,
98 MV_GPIO_PORT_CTL = 0x104f0,
99 MV_RESET_CFG = 0x180d8,
101 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
103 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
104 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
107 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
109 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
110 * CRPB needs alignment on a 256B boundary. Size == 256B
111 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
112 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
114 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
115 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
117 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
118 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
121 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
122 MV_PORT_HC_SHIFT = 2,
123 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
127 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
128 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
129 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
130 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
131 ATA_FLAG_PIO_POLLING,
132 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
134 CRQB_FLAG_READ = (1 << 0),
136 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
137 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
138 CRQB_CMD_ADDR_SHIFT = 8,
139 CRQB_CMD_CS = (0x2 << 11),
140 CRQB_CMD_LAST = (1 << 15),
142 CRPB_FLAG_STATUS_SHIFT = 8,
143 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
144 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
146 EPRD_FLAG_END_OF_TBL = (1 << 31),
148 /* PCI interface registers */
150 PCI_COMMAND_OFS = 0xc00,
152 PCI_MAIN_CMD_STS_OFS = 0xd30,
153 STOP_PCI_MASTER = (1 << 2),
154 PCI_MASTER_EMPTY = (1 << 3),
155 GLOB_SFT_RST = (1 << 4),
158 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
159 MV_PCI_DISC_TIMER = 0xd04,
160 MV_PCI_MSI_TRIGGER = 0xc38,
161 MV_PCI_SERR_MASK = 0xc28,
162 MV_PCI_XBAR_TMOUT = 0x1d04,
163 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
164 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
165 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
166 MV_PCI_ERR_COMMAND = 0x1d50,
168 PCI_IRQ_CAUSE_OFS = 0x1d58,
169 PCI_IRQ_MASK_OFS = 0x1d5c,
170 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
172 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
173 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
174 PORT0_ERR = (1 << 0), /* shift by port # */
175 PORT0_DONE = (1 << 1), /* shift by port # */
176 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
177 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
179 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
180 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
181 PORTS_0_3_COAL_DONE = (1 << 8),
182 PORTS_4_7_COAL_DONE = (1 << 17),
183 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
184 GPIO_INT = (1 << 22),
185 SELF_INT = (1 << 23),
186 TWSI_INT = (1 << 24),
187 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
188 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
189 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
190 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
192 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
195 /* SATAHC registers */
198 HC_IRQ_CAUSE_OFS = 0x14,
199 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
200 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
201 DEV_IRQ = (1 << 8), /* shift by port # */
203 /* Shadow block registers */
205 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
208 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
209 SATA_ACTIVE_OFS = 0x350,
216 SATA_INTERFACE_CTL = 0x050,
218 MV_M2_PREAMP_MASK = 0x7e0,
222 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
223 EDMA_CFG_NCQ = (1 << 5),
224 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
225 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
226 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
228 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
229 EDMA_ERR_IRQ_MASK_OFS = 0xc,
230 EDMA_ERR_D_PAR = (1 << 0),
231 EDMA_ERR_PRD_PAR = (1 << 1),
232 EDMA_ERR_DEV = (1 << 2),
233 EDMA_ERR_DEV_DCON = (1 << 3),
234 EDMA_ERR_DEV_CON = (1 << 4),
235 EDMA_ERR_SERR = (1 << 5),
236 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
237 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
238 EDMA_ERR_BIST_ASYNC = (1 << 8),
239 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
240 EDMA_ERR_CRBQ_PAR = (1 << 9),
241 EDMA_ERR_CRPB_PAR = (1 << 10),
242 EDMA_ERR_INTRL_PAR = (1 << 11),
243 EDMA_ERR_IORDY = (1 << 12),
244 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
245 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
246 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
247 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
248 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
249 EDMA_ERR_TRANS_PROTO = (1 << 31),
250 EDMA_ERR_OVERRUN_5 = (1 << 5),
251 EDMA_ERR_UNDERRUN_5 = (1 << 6),
252 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
262 EDMA_ERR_LNK_CTRL_RX_2 |
263 EDMA_ERR_LNK_DATA_RX |
264 EDMA_ERR_LNK_DATA_TX |
265 EDMA_ERR_TRANS_PROTO,
266 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
271 EDMA_ERR_UNDERRUN_5 |
272 EDMA_ERR_SELF_DIS_5 |
278 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
279 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
281 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
282 EDMA_REQ_Q_PTR_SHIFT = 5,
284 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
285 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
286 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
287 EDMA_RSP_Q_PTR_SHIFT = 3,
294 EDMA_IORDY_TMOUT = 0x34,
297 /* Host private flags (hp_flags) */
298 MV_HP_FLAG_MSI = (1 << 0),
299 MV_HP_ERRATA_50XXB0 = (1 << 1),
300 MV_HP_ERRATA_50XXB2 = (1 << 2),
301 MV_HP_ERRATA_60X1B2 = (1 << 3),
302 MV_HP_ERRATA_60X1C0 = (1 << 4),
303 MV_HP_ERRATA_XX42A0 = (1 << 5),
304 MV_HP_GEN_I = (1 << 6),
305 MV_HP_GEN_II = (1 << 7),
306 MV_HP_GEN_IIE = (1 << 8),
308 /* Port private flags (pp_flags) */
309 MV_PP_FLAG_EDMA_EN = (1 << 0),
310 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
311 MV_PP_FLAG_HAD_A_RESET = (1 << 2),
314 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
315 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
316 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
319 MV_DMA_BOUNDARY = 0xffffffffU,
321 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
323 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
336 /* Command ReQuest Block: 32B */
352 /* Command ResPonse Block: 8B */
359 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
367 struct mv_port_priv {
368 struct mv_crqb *crqb;
370 struct mv_crpb *crpb;
372 struct mv_sg *sg_tbl;
373 dma_addr_t sg_tbl_dma;
375 unsigned int req_idx;
376 unsigned int resp_idx;
381 struct mv_port_signal {
388 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
390 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
391 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
393 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
395 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
399 struct mv_host_priv {
401 struct mv_port_signal signal[8];
402 const struct mv_hw_ops *ops;
405 static void mv_irq_clear(struct ata_port *ap);
406 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
407 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
408 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
409 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
410 static int mv_port_start(struct ata_port *ap);
411 static void mv_port_stop(struct ata_port *ap);
412 static void mv_qc_prep(struct ata_queued_cmd *qc);
413 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
414 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
415 static void mv_error_handler(struct ata_port *ap);
416 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
417 static void mv_eh_freeze(struct ata_port *ap);
418 static void mv_eh_thaw(struct ata_port *ap);
419 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
421 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
423 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
424 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
426 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
428 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
429 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
431 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
433 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
434 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
436 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
438 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
439 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
440 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
441 unsigned int port_no);
443 static struct scsi_host_template mv5_sht = {
444 .module = THIS_MODULE,
446 .ioctl = ata_scsi_ioctl,
447 .queuecommand = ata_scsi_queuecmd,
448 .can_queue = ATA_DEF_QUEUE,
449 .this_id = ATA_SHT_THIS_ID,
450 .sg_tablesize = MV_MAX_SG_CT,
451 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
452 .emulated = ATA_SHT_EMULATED,
454 .proc_name = DRV_NAME,
455 .dma_boundary = MV_DMA_BOUNDARY,
456 .slave_configure = ata_scsi_slave_config,
457 .slave_destroy = ata_scsi_slave_destroy,
458 .bios_param = ata_std_bios_param,
461 static struct scsi_host_template mv6_sht = {
462 .module = THIS_MODULE,
464 .ioctl = ata_scsi_ioctl,
465 .queuecommand = ata_scsi_queuecmd,
466 .can_queue = ATA_DEF_QUEUE,
467 .this_id = ATA_SHT_THIS_ID,
468 .sg_tablesize = MV_MAX_SG_CT,
469 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
470 .emulated = ATA_SHT_EMULATED,
472 .proc_name = DRV_NAME,
473 .dma_boundary = MV_DMA_BOUNDARY,
474 .slave_configure = ata_scsi_slave_config,
475 .slave_destroy = ata_scsi_slave_destroy,
476 .bios_param = ata_std_bios_param,
479 static const struct ata_port_operations mv5_ops = {
480 .port_disable = ata_port_disable,
482 .tf_load = ata_tf_load,
483 .tf_read = ata_tf_read,
484 .check_status = ata_check_status,
485 .exec_command = ata_exec_command,
486 .dev_select = ata_std_dev_select,
488 .cable_detect = ata_cable_sata,
490 .qc_prep = mv_qc_prep,
491 .qc_issue = mv_qc_issue,
492 .data_xfer = ata_data_xfer,
494 .irq_clear = mv_irq_clear,
495 .irq_on = ata_irq_on,
496 .irq_ack = ata_irq_ack,
498 .error_handler = mv_error_handler,
499 .post_internal_cmd = mv_post_int_cmd,
500 .freeze = mv_eh_freeze,
503 .scr_read = mv5_scr_read,
504 .scr_write = mv5_scr_write,
506 .port_start = mv_port_start,
507 .port_stop = mv_port_stop,
510 static const struct ata_port_operations mv6_ops = {
511 .port_disable = ata_port_disable,
513 .tf_load = ata_tf_load,
514 .tf_read = ata_tf_read,
515 .check_status = ata_check_status,
516 .exec_command = ata_exec_command,
517 .dev_select = ata_std_dev_select,
519 .cable_detect = ata_cable_sata,
521 .qc_prep = mv_qc_prep,
522 .qc_issue = mv_qc_issue,
523 .data_xfer = ata_data_xfer,
525 .irq_clear = mv_irq_clear,
526 .irq_on = ata_irq_on,
527 .irq_ack = ata_irq_ack,
529 .error_handler = mv_error_handler,
530 .post_internal_cmd = mv_post_int_cmd,
531 .freeze = mv_eh_freeze,
534 .scr_read = mv_scr_read,
535 .scr_write = mv_scr_write,
537 .port_start = mv_port_start,
538 .port_stop = mv_port_stop,
541 static const struct ata_port_operations mv_iie_ops = {
542 .port_disable = ata_port_disable,
544 .tf_load = ata_tf_load,
545 .tf_read = ata_tf_read,
546 .check_status = ata_check_status,
547 .exec_command = ata_exec_command,
548 .dev_select = ata_std_dev_select,
550 .cable_detect = ata_cable_sata,
552 .qc_prep = mv_qc_prep_iie,
553 .qc_issue = mv_qc_issue,
554 .data_xfer = ata_data_xfer,
556 .irq_clear = mv_irq_clear,
557 .irq_on = ata_irq_on,
558 .irq_ack = ata_irq_ack,
560 .error_handler = mv_error_handler,
561 .post_internal_cmd = mv_post_int_cmd,
562 .freeze = mv_eh_freeze,
565 .scr_read = mv_scr_read,
566 .scr_write = mv_scr_write,
568 .port_start = mv_port_start,
569 .port_stop = mv_port_stop,
572 static const struct ata_port_info mv_port_info[] = {
574 .flags = MV_COMMON_FLAGS,
575 .pio_mask = 0x1f, /* pio0-4 */
576 .udma_mask = ATA_UDMA6,
577 .port_ops = &mv5_ops,
580 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
581 .pio_mask = 0x1f, /* pio0-4 */
582 .udma_mask = ATA_UDMA6,
583 .port_ops = &mv5_ops,
586 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
587 .pio_mask = 0x1f, /* pio0-4 */
588 .udma_mask = ATA_UDMA6,
589 .port_ops = &mv5_ops,
592 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
593 .pio_mask = 0x1f, /* pio0-4 */
594 .udma_mask = ATA_UDMA6,
595 .port_ops = &mv6_ops,
598 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
600 .pio_mask = 0x1f, /* pio0-4 */
601 .udma_mask = ATA_UDMA6,
602 .port_ops = &mv6_ops,
605 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
606 .pio_mask = 0x1f, /* pio0-4 */
607 .udma_mask = ATA_UDMA6,
608 .port_ops = &mv_iie_ops,
611 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
612 .pio_mask = 0x1f, /* pio0-4 */
613 .udma_mask = ATA_UDMA6,
614 .port_ops = &mv_iie_ops,
618 static const struct pci_device_id mv_pci_tbl[] = {
619 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
620 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
621 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
622 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
624 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
625 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
626 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
627 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
628 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
630 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
633 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
635 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
637 /* add Marvell 7042 support */
638 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
640 { } /* terminate list */
643 static struct pci_driver mv_pci_driver = {
645 .id_table = mv_pci_tbl,
646 .probe = mv_init_one,
647 .remove = ata_pci_remove_one,
650 static const struct mv_hw_ops mv5xxx_ops = {
651 .phy_errata = mv5_phy_errata,
652 .enable_leds = mv5_enable_leds,
653 .read_preamp = mv5_read_preamp,
654 .reset_hc = mv5_reset_hc,
655 .reset_flash = mv5_reset_flash,
656 .reset_bus = mv5_reset_bus,
659 static const struct mv_hw_ops mv6xxx_ops = {
660 .phy_errata = mv6_phy_errata,
661 .enable_leds = mv6_enable_leds,
662 .read_preamp = mv6_read_preamp,
663 .reset_hc = mv6_reset_hc,
664 .reset_flash = mv6_reset_flash,
665 .reset_bus = mv_reset_pci_bus,
671 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
674 /* move to PCI layer or libata core? */
675 static int pci_go_64(struct pci_dev *pdev)
679 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
680 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
682 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
684 dev_printk(KERN_ERR, &pdev->dev,
685 "64-bit DMA enable failed\n");
690 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
692 dev_printk(KERN_ERR, &pdev->dev,
693 "32-bit DMA enable failed\n");
696 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
698 dev_printk(KERN_ERR, &pdev->dev,
699 "32-bit consistent DMA enable failed\n");
711 static inline void writelfl(unsigned long data, void __iomem *addr)
714 (void) readl(addr); /* flush to avoid PCI posted write */
717 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
719 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
722 static inline unsigned int mv_hc_from_port(unsigned int port)
724 return port >> MV_PORT_HC_SHIFT;
727 static inline unsigned int mv_hardport_from_port(unsigned int port)
729 return port & MV_PORT_MASK;
732 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
735 return mv_hc_base(base, mv_hc_from_port(port));
738 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
740 return mv_hc_base_from_port(base, port) +
741 MV_SATAHC_ARBTR_REG_SZ +
742 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
745 static inline void __iomem *mv_ap_base(struct ata_port *ap)
747 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
750 static inline int mv_get_hc_count(unsigned long port_flags)
752 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
755 static void mv_irq_clear(struct ata_port *ap)
759 static void mv_set_edma_ptrs(void __iomem *port_mmio,
760 struct mv_host_priv *hpriv,
761 struct mv_port_priv *pp)
766 * initialize request queue
768 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
770 WARN_ON(pp->crqb_dma & 0x3ff);
771 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
772 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
773 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
775 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
776 writelfl((pp->crqb_dma & 0xffffffff) | index,
777 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
779 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
782 * initialize response queue
784 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
786 WARN_ON(pp->crpb_dma & 0xff);
787 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
789 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
790 writelfl((pp->crpb_dma & 0xffffffff) | index,
791 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
793 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
795 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
796 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
800 * mv_start_dma - Enable eDMA engine
801 * @base: port base address
802 * @pp: port private data
804 * Verify the local cache of the eDMA state is accurate with a
808 * Inherited from caller.
810 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
811 struct mv_port_priv *pp)
813 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
814 /* clear EDMA event indicators, if any */
815 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
817 mv_set_edma_ptrs(base, hpriv, pp);
819 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
820 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
822 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
826 * mv_stop_dma - Disable eDMA engine
827 * @ap: ATA channel to manipulate
829 * Verify the local cache of the eDMA state is accurate with a
833 * Inherited from caller.
835 static int mv_stop_dma(struct ata_port *ap)
837 void __iomem *port_mmio = mv_ap_base(ap);
838 struct mv_port_priv *pp = ap->private_data;
842 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
843 /* Disable EDMA if active. The disable bit auto clears.
845 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
846 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
848 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
851 /* now properly wait for the eDMA to stop */
852 for (i = 1000; i > 0; i--) {
853 reg = readl(port_mmio + EDMA_CMD_OFS);
854 if (!(reg & EDMA_EN))
861 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
869 static void mv_dump_mem(void __iomem *start, unsigned bytes)
872 for (b = 0; b < bytes; ) {
873 DPRINTK("%p: ", start + b);
874 for (w = 0; b < bytes && w < 4; w++) {
875 printk("%08x ",readl(start + b));
883 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
888 for (b = 0; b < bytes; ) {
889 DPRINTK("%02x: ", b);
890 for (w = 0; b < bytes && w < 4; w++) {
891 (void) pci_read_config_dword(pdev,b,&dw);
899 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
900 struct pci_dev *pdev)
903 void __iomem *hc_base = mv_hc_base(mmio_base,
904 port >> MV_PORT_HC_SHIFT);
905 void __iomem *port_base;
906 int start_port, num_ports, p, start_hc, num_hcs, hc;
909 start_hc = start_port = 0;
910 num_ports = 8; /* shld be benign for 4 port devs */
913 start_hc = port >> MV_PORT_HC_SHIFT;
915 num_ports = num_hcs = 1;
917 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
918 num_ports > 1 ? num_ports - 1 : start_port);
921 DPRINTK("PCI config space regs:\n");
922 mv_dump_pci_cfg(pdev, 0x68);
924 DPRINTK("PCI regs:\n");
925 mv_dump_mem(mmio_base+0xc00, 0x3c);
926 mv_dump_mem(mmio_base+0xd00, 0x34);
927 mv_dump_mem(mmio_base+0xf00, 0x4);
928 mv_dump_mem(mmio_base+0x1d00, 0x6c);
929 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
930 hc_base = mv_hc_base(mmio_base, hc);
931 DPRINTK("HC regs (HC %i):\n", hc);
932 mv_dump_mem(hc_base, 0x1c);
934 for (p = start_port; p < start_port + num_ports; p++) {
935 port_base = mv_port_base(mmio_base, p);
936 DPRINTK("EDMA regs (port %i):\n",p);
937 mv_dump_mem(port_base, 0x54);
938 DPRINTK("SATA regs (port %i):\n",p);
939 mv_dump_mem(port_base+0x300, 0x60);
944 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
952 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
955 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
964 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
966 unsigned int ofs = mv_scr_offset(sc_reg_in);
968 if (0xffffffffU != ofs)
969 return readl(mv_ap_base(ap) + ofs);
974 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
976 unsigned int ofs = mv_scr_offset(sc_reg_in);
978 if (0xffffffffU != ofs)
979 writelfl(val, mv_ap_base(ap) + ofs);
982 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
983 void __iomem *port_mmio)
985 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
987 /* set up non-NCQ EDMA configuration */
988 cfg &= ~(1 << 9); /* disable eQue */
990 if (IS_GEN_I(hpriv)) {
991 cfg &= ~0x1f; /* clear queue depth */
992 cfg |= (1 << 8); /* enab config burst size mask */
995 else if (IS_GEN_II(hpriv)) {
996 cfg &= ~0x1f; /* clear queue depth */
997 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
998 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1001 else if (IS_GEN_IIE(hpriv)) {
1002 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1003 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1004 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1005 cfg |= (1 << 18); /* enab early completion */
1006 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1007 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
1008 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1011 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1015 * mv_port_start - Port specific init/start routine.
1016 * @ap: ATA channel to manipulate
1018 * Allocate and point to DMA memory, init port private memory,
1022 * Inherited from caller.
1024 static int mv_port_start(struct ata_port *ap)
1026 struct device *dev = ap->host->dev;
1027 struct mv_host_priv *hpriv = ap->host->private_data;
1028 struct mv_port_priv *pp;
1029 void __iomem *port_mmio = mv_ap_base(ap);
1034 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1038 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1042 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1044 rc = ata_pad_alloc(ap, dev);
1048 /* First item in chunk of DMA memory:
1049 * 32-slot command request table (CRQB), 32 bytes each in size
1052 pp->crqb_dma = mem_dma;
1053 mem += MV_CRQB_Q_SZ;
1054 mem_dma += MV_CRQB_Q_SZ;
1057 * 32-slot command response table (CRPB), 8 bytes each in size
1060 pp->crpb_dma = mem_dma;
1061 mem += MV_CRPB_Q_SZ;
1062 mem_dma += MV_CRPB_Q_SZ;
1065 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1068 pp->sg_tbl_dma = mem_dma;
1070 mv_edma_cfg(ap, hpriv, port_mmio);
1072 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1074 /* Don't turn on EDMA here...do it before DMA commands only. Else
1075 * we'll be unable to send non-data, PIO, etc due to restricted access
1078 ap->private_data = pp;
1083 * mv_port_stop - Port specific cleanup/stop routine.
1084 * @ap: ATA channel to manipulate
1086 * Stop DMA, cleanup port memory.
1089 * This routine uses the host lock to protect the DMA stop.
1091 static void mv_port_stop(struct ata_port *ap)
1093 unsigned long flags;
1095 spin_lock_irqsave(&ap->host->lock, flags);
1097 spin_unlock_irqrestore(&ap->host->lock, flags);
1101 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1102 * @qc: queued command whose SG list to source from
1104 * Populate the SG list and mark the last entry.
1107 * Inherited from caller.
1109 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1111 struct mv_port_priv *pp = qc->ap->private_data;
1112 unsigned int n_sg = 0;
1113 struct scatterlist *sg;
1114 struct mv_sg *mv_sg;
1117 ata_for_each_sg(sg, qc) {
1118 dma_addr_t addr = sg_dma_address(sg);
1119 u32 sg_len = sg_dma_len(sg);
1121 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1122 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1123 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1125 if (ata_sg_is_last(sg, qc))
1126 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1135 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1137 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1138 (last ? CRQB_CMD_LAST : 0);
1139 *cmdw = cpu_to_le16(tmp);
1143 * mv_qc_prep - Host specific command preparation.
1144 * @qc: queued command to prepare
1146 * This routine simply redirects to the general purpose routine
1147 * if command is not DMA. Else, it handles prep of the CRQB
1148 * (command request block), does some sanity checking, and calls
1149 * the SG load routine.
1152 * Inherited from caller.
1154 static void mv_qc_prep(struct ata_queued_cmd *qc)
1156 struct ata_port *ap = qc->ap;
1157 struct mv_port_priv *pp = ap->private_data;
1159 struct ata_taskfile *tf;
1163 if (qc->tf.protocol != ATA_PROT_DMA)
1166 /* Fill in command request block
1168 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1169 flags |= CRQB_FLAG_READ;
1170 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1171 flags |= qc->tag << CRQB_TAG_SHIFT;
1172 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1174 /* get current queue index from software */
1175 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1177 pp->crqb[in_index].sg_addr =
1178 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1179 pp->crqb[in_index].sg_addr_hi =
1180 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1181 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1183 cw = &pp->crqb[in_index].ata_cmd[0];
1186 /* Sadly, the CRQB cannot accomodate all registers--there are
1187 * only 11 bytes...so we must pick and choose required
1188 * registers based on the command. So, we drop feature and
1189 * hob_feature for [RW] DMA commands, but they are needed for
1190 * NCQ. NCQ will drop hob_nsect.
1192 switch (tf->command) {
1194 case ATA_CMD_READ_EXT:
1196 case ATA_CMD_WRITE_EXT:
1197 case ATA_CMD_WRITE_FUA_EXT:
1198 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1200 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1201 case ATA_CMD_FPDMA_READ:
1202 case ATA_CMD_FPDMA_WRITE:
1203 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1204 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1206 #endif /* FIXME: remove this line when NCQ added */
1208 /* The only other commands EDMA supports in non-queued and
1209 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1210 * of which are defined/used by Linux. If we get here, this
1211 * driver needs work.
1213 * FIXME: modify libata to give qc_prep a return value and
1214 * return error here.
1216 BUG_ON(tf->command);
1219 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1220 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1221 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1222 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1223 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1224 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1225 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1226 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1227 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1229 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1235 * mv_qc_prep_iie - Host specific command preparation.
1236 * @qc: queued command to prepare
1238 * This routine simply redirects to the general purpose routine
1239 * if command is not DMA. Else, it handles prep of the CRQB
1240 * (command request block), does some sanity checking, and calls
1241 * the SG load routine.
1244 * Inherited from caller.
1246 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1248 struct ata_port *ap = qc->ap;
1249 struct mv_port_priv *pp = ap->private_data;
1250 struct mv_crqb_iie *crqb;
1251 struct ata_taskfile *tf;
1255 if (qc->tf.protocol != ATA_PROT_DMA)
1258 /* Fill in Gen IIE command request block
1260 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1261 flags |= CRQB_FLAG_READ;
1263 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1264 flags |= qc->tag << CRQB_TAG_SHIFT;
1265 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1266 what we use as our tag */
1268 /* get current queue index from software */
1269 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1271 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1272 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1273 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1274 crqb->flags = cpu_to_le32(flags);
1277 crqb->ata_cmd[0] = cpu_to_le32(
1278 (tf->command << 16) |
1281 crqb->ata_cmd[1] = cpu_to_le32(
1287 crqb->ata_cmd[2] = cpu_to_le32(
1288 (tf->hob_lbal << 0) |
1289 (tf->hob_lbam << 8) |
1290 (tf->hob_lbah << 16) |
1291 (tf->hob_feature << 24)
1293 crqb->ata_cmd[3] = cpu_to_le32(
1295 (tf->hob_nsect << 8)
1298 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1304 * mv_qc_issue - Initiate a command to the host
1305 * @qc: queued command to start
1307 * This routine simply redirects to the general purpose routine
1308 * if command is not DMA. Else, it sanity checks our local
1309 * caches of the request producer/consumer indices then enables
1310 * DMA and bumps the request producer index.
1313 * Inherited from caller.
1315 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1317 struct ata_port *ap = qc->ap;
1318 void __iomem *port_mmio = mv_ap_base(ap);
1319 struct mv_port_priv *pp = ap->private_data;
1320 struct mv_host_priv *hpriv = ap->host->private_data;
1323 if (qc->tf.protocol != ATA_PROT_DMA) {
1324 /* We're about to send a non-EDMA capable command to the
1325 * port. Turn off EDMA so there won't be problems accessing
1326 * shadow block, etc registers.
1329 return ata_qc_issue_prot(qc);
1332 mv_start_dma(port_mmio, hpriv, pp);
1334 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1336 /* until we do queuing, the queue should be empty at this point */
1337 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1338 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1342 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1344 /* and write the request in pointer to kick the EDMA to life */
1345 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1346 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1352 * mv_err_intr - Handle error interrupts on the port
1353 * @ap: ATA channel to manipulate
1354 * @reset_allowed: bool: 0 == don't trigger from reset here
1356 * In most cases, just clear the interrupt and move on. However,
1357 * some cases require an eDMA reset, which is done right before
1358 * the COMRESET in mv_phy_reset(). The SERR case requires a
1359 * clear of pending errors in the SATA SERROR register. Finally,
1360 * if the port disabled DMA, update our cached copy to match.
1363 * Inherited from caller.
1365 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1367 void __iomem *port_mmio = mv_ap_base(ap);
1368 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1369 struct mv_port_priv *pp = ap->private_data;
1370 struct mv_host_priv *hpriv = ap->host->private_data;
1371 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1372 unsigned int action = 0, err_mask = 0;
1373 struct ata_eh_info *ehi = &ap->eh_info;
1375 ata_ehi_clear_desc(ehi);
1377 if (!edma_enabled) {
1378 /* just a guess: do we need to do this? should we
1379 * expand this, and do it in all cases?
1381 sata_scr_read(ap, SCR_ERROR, &serr);
1382 sata_scr_write_flush(ap, SCR_ERROR, serr);
1385 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1387 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1390 * all generations share these EDMA error cause bits
1393 if (edma_err_cause & EDMA_ERR_DEV)
1394 err_mask |= AC_ERR_DEV;
1395 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1396 EDMA_ERR_CRBQ_PAR | EDMA_ERR_CRPB_PAR |
1397 EDMA_ERR_INTRL_PAR)) {
1398 err_mask |= AC_ERR_ATA_BUS;
1399 action |= ATA_EH_HARDRESET;
1400 ata_ehi_push_desc(ehi, ", parity error");
1402 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1403 ata_ehi_hotplugged(ehi);
1404 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1405 ", dev disconnect" : ", dev connect");
1408 if (IS_GEN_I(hpriv)) {
1409 eh_freeze_mask = EDMA_EH_FREEZE_5;
1411 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1412 struct mv_port_priv *pp = ap->private_data;
1413 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1414 ata_ehi_push_desc(ehi, ", EDMA self-disable");
1417 eh_freeze_mask = EDMA_EH_FREEZE;
1419 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1420 struct mv_port_priv *pp = ap->private_data;
1421 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1422 ata_ehi_push_desc(ehi, ", EDMA self-disable");
1425 if (edma_err_cause & EDMA_ERR_SERR) {
1426 sata_scr_read(ap, SCR_ERROR, &serr);
1427 sata_scr_write_flush(ap, SCR_ERROR, serr);
1428 err_mask = AC_ERR_ATA_BUS;
1429 action |= ATA_EH_HARDRESET;
1433 /* Clear EDMA now that SERR cleanup done */
1434 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1437 err_mask = AC_ERR_OTHER;
1438 action |= ATA_EH_HARDRESET;
1441 ehi->serror |= serr;
1442 ehi->action |= action;
1445 qc->err_mask |= err_mask;
1447 ehi->err_mask |= err_mask;
1449 if (edma_err_cause & eh_freeze_mask)
1450 ata_port_freeze(ap);
1455 static void mv_intr_pio(struct ata_port *ap)
1457 struct ata_queued_cmd *qc;
1460 /* ignore spurious intr if drive still BUSY */
1461 ata_status = readb(ap->ioaddr.status_addr);
1462 if (unlikely(ata_status & ATA_BUSY))
1465 /* get active ATA command */
1466 qc = ata_qc_from_tag(ap, ap->active_tag);
1467 if (unlikely(!qc)) /* no active tag */
1469 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1472 /* and finally, complete the ATA command */
1473 qc->err_mask |= ac_err_mask(ata_status);
1474 ata_qc_complete(qc);
1477 static void mv_intr_edma(struct ata_port *ap)
1479 void __iomem *port_mmio = mv_ap_base(ap);
1480 struct mv_host_priv *hpriv = ap->host->private_data;
1481 struct mv_port_priv *pp = ap->private_data;
1482 struct ata_queued_cmd *qc;
1483 u32 out_index, in_index;
1484 bool work_done = false;
1486 /* get h/w response queue pointer */
1487 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1488 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1493 /* get s/w response queue last-read pointer, and compare */
1494 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1495 if (in_index == out_index)
1499 /* 50xx: get active ATA command */
1500 if (IS_GEN_I(hpriv))
1501 qc = ata_qc_from_tag(ap, ap->active_tag);
1503 /* 60xx: get active ATA command via tag, to enable support
1504 * for queueing. this works transparently for queued and
1510 if (IS_GEN_II(hpriv))
1511 tag = (le16_to_cpu(pp->crpb[out_index].id)
1512 >> CRPB_IOID_SHIFT_6) & 0x3f;
1514 tag = (le16_to_cpu(pp->crpb[out_index].id)
1515 >> CRPB_IOID_SHIFT_7) & 0x3f;
1517 qc = ata_qc_from_tag(ap, tag);
1520 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1521 * bits (WARNING: might not necessarily be associated
1522 * with this command), which -should- be clear
1525 status = le16_to_cpu(pp->crpb[out_index].flags);
1526 if (unlikely(status & 0xff)) {
1527 mv_err_intr(ap, qc);
1531 /* and finally, complete the ATA command */
1534 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1535 ata_qc_complete(qc);
1538 /* advance software response queue pointer, to
1539 * indicate (after the loop completes) to hardware
1540 * that we have consumed a response queue entry.
1547 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1548 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1549 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1553 * mv_host_intr - Handle all interrupts on the given host controller
1554 * @host: host specific structure
1555 * @relevant: port error bits relevant to this host controller
1556 * @hc: which host controller we're to look at
1558 * Read then write clear the HC interrupt status then walk each
1559 * port connected to the HC and see if it needs servicing. Port
1560 * success ints are reported in the HC interrupt status reg, the
1561 * port error ints are reported in the higher level main
1562 * interrupt status register and thus are passed in via the
1563 * 'relevant' argument.
1566 * Inherited from caller.
1568 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1570 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1571 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1578 port0 = MV_PORTS_PER_HC;
1580 /* we'll need the HC success int register in most cases */
1581 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1585 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1587 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1588 hc,relevant,hc_irq_cause);
1590 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1591 struct ata_port *ap = host->ports[port];
1592 struct mv_port_priv *pp = ap->private_data;
1593 int have_err_bits, hard_port, shift;
1595 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1598 shift = port << 1; /* (port * 2) */
1599 if (port >= MV_PORTS_PER_HC) {
1600 shift++; /* skip bit 8 in the HC Main IRQ reg */
1602 have_err_bits = ((PORT0_ERR << shift) & relevant);
1604 if (unlikely(have_err_bits)) {
1605 struct ata_queued_cmd *qc;
1607 qc = ata_qc_from_tag(ap, ap->active_tag);
1608 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1611 mv_err_intr(ap, qc);
1615 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1617 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1618 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1621 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1628 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1630 struct ata_port *ap;
1631 struct ata_queued_cmd *qc;
1632 struct ata_eh_info *ehi;
1633 unsigned int i, err_mask, printed = 0;
1636 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1638 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1641 DPRINTK("All regs @ PCI error\n");
1642 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1644 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1646 for (i = 0; i < host->n_ports; i++) {
1647 ap = host->ports[i];
1648 if (!ata_port_offline(ap)) {
1650 ata_ehi_clear_desc(ehi);
1652 ata_ehi_push_desc(ehi,
1653 "PCI err cause 0x%08x", err_cause);
1654 err_mask = AC_ERR_HOST_BUS;
1655 ehi->action = ATA_EH_HARDRESET;
1656 qc = ata_qc_from_tag(ap, ap->active_tag);
1658 qc->err_mask |= err_mask;
1660 ehi->err_mask |= err_mask;
1662 ata_port_freeze(ap);
1668 * mv_interrupt - Main interrupt event handler
1670 * @dev_instance: private data; in this case the host structure
1672 * Read the read only register to determine if any host
1673 * controllers have pending interrupts. If so, call lower level
1674 * routine to handle. Also check for PCI errors which are only
1678 * This routine holds the host lock while processing pending
1681 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1683 struct ata_host *host = dev_instance;
1684 unsigned int hc, handled = 0, n_hcs;
1685 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1688 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1690 /* check the cases where we either have nothing pending or have read
1691 * a bogus register value which can indicate HW removal or PCI fault
1693 if (!irq_stat || (0xffffffffU == irq_stat))
1696 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1697 spin_lock(&host->lock);
1699 if (unlikely(irq_stat & PCI_ERR)) {
1700 mv_pci_error(host, mmio);
1702 goto out_unlock; /* skip all other HC irq handling */
1705 for (hc = 0; hc < n_hcs; hc++) {
1706 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1708 mv_host_intr(host, relevant, hc);
1714 spin_unlock(&host->lock);
1716 return IRQ_RETVAL(handled);
1719 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1721 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1722 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1724 return hc_mmio + ofs;
1727 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1731 switch (sc_reg_in) {
1735 ofs = sc_reg_in * sizeof(u32);
1744 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1746 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1747 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1748 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1750 if (ofs != 0xffffffffU)
1751 return readl(addr + ofs);
1756 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1758 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1759 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1760 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1762 if (ofs != 0xffffffffU)
1763 writelfl(val, addr + ofs);
1766 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1770 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1773 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1775 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1778 mv_reset_pci_bus(pdev, mmio);
1781 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1783 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1786 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1789 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1792 tmp = readl(phy_mmio + MV5_PHY_MODE);
1794 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1795 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1798 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1802 writel(0, mmio + MV_GPIO_PORT_CTL);
1804 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1806 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1808 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1811 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1814 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1815 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1817 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1820 tmp = readl(phy_mmio + MV5_LT_MODE);
1822 writel(tmp, phy_mmio + MV5_LT_MODE);
1824 tmp = readl(phy_mmio + MV5_PHY_CTL);
1827 writel(tmp, phy_mmio + MV5_PHY_CTL);
1830 tmp = readl(phy_mmio + MV5_PHY_MODE);
1832 tmp |= hpriv->signal[port].pre;
1833 tmp |= hpriv->signal[port].amps;
1834 writel(tmp, phy_mmio + MV5_PHY_MODE);
1839 #define ZERO(reg) writel(0, port_mmio + (reg))
1840 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1843 void __iomem *port_mmio = mv_port_base(mmio, port);
1845 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1847 mv_channel_reset(hpriv, mmio, port);
1849 ZERO(0x028); /* command */
1850 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1851 ZERO(0x004); /* timer */
1852 ZERO(0x008); /* irq err cause */
1853 ZERO(0x00c); /* irq err mask */
1854 ZERO(0x010); /* rq bah */
1855 ZERO(0x014); /* rq inp */
1856 ZERO(0x018); /* rq outp */
1857 ZERO(0x01c); /* respq bah */
1858 ZERO(0x024); /* respq outp */
1859 ZERO(0x020); /* respq inp */
1860 ZERO(0x02c); /* test control */
1861 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1865 #define ZERO(reg) writel(0, hc_mmio + (reg))
1866 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1869 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1877 tmp = readl(hc_mmio + 0x20);
1880 writel(tmp, hc_mmio + 0x20);
1884 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1887 unsigned int hc, port;
1889 for (hc = 0; hc < n_hc; hc++) {
1890 for (port = 0; port < MV_PORTS_PER_HC; port++)
1891 mv5_reset_hc_port(hpriv, mmio,
1892 (hc * MV_PORTS_PER_HC) + port);
1894 mv5_reset_one_hc(hpriv, mmio, hc);
1901 #define ZERO(reg) writel(0, mmio + (reg))
1902 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1906 tmp = readl(mmio + MV_PCI_MODE);
1908 writel(tmp, mmio + MV_PCI_MODE);
1910 ZERO(MV_PCI_DISC_TIMER);
1911 ZERO(MV_PCI_MSI_TRIGGER);
1912 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1913 ZERO(HC_MAIN_IRQ_MASK_OFS);
1914 ZERO(MV_PCI_SERR_MASK);
1915 ZERO(PCI_IRQ_CAUSE_OFS);
1916 ZERO(PCI_IRQ_MASK_OFS);
1917 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1918 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1919 ZERO(MV_PCI_ERR_ATTRIBUTE);
1920 ZERO(MV_PCI_ERR_COMMAND);
1924 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1928 mv5_reset_flash(hpriv, mmio);
1930 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1932 tmp |= (1 << 5) | (1 << 6);
1933 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1937 * mv6_reset_hc - Perform the 6xxx global soft reset
1938 * @mmio: base address of the HBA
1940 * This routine only applies to 6xxx parts.
1943 * Inherited from caller.
1945 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1948 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1952 /* Following procedure defined in PCI "main command and status
1956 writel(t | STOP_PCI_MASTER, reg);
1958 for (i = 0; i < 1000; i++) {
1961 if (PCI_MASTER_EMPTY & t) {
1965 if (!(PCI_MASTER_EMPTY & t)) {
1966 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1974 writel(t | GLOB_SFT_RST, reg);
1977 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1979 if (!(GLOB_SFT_RST & t)) {
1980 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1985 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1988 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1991 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1993 if (GLOB_SFT_RST & t) {
1994 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2001 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2004 void __iomem *port_mmio;
2007 tmp = readl(mmio + MV_RESET_CFG);
2008 if ((tmp & (1 << 0)) == 0) {
2009 hpriv->signal[idx].amps = 0x7 << 8;
2010 hpriv->signal[idx].pre = 0x1 << 5;
2014 port_mmio = mv_port_base(mmio, idx);
2015 tmp = readl(port_mmio + PHY_MODE2);
2017 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2018 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2021 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2023 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2026 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2029 void __iomem *port_mmio = mv_port_base(mmio, port);
2031 u32 hp_flags = hpriv->hp_flags;
2033 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2035 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2038 if (fix_phy_mode2) {
2039 m2 = readl(port_mmio + PHY_MODE2);
2042 writel(m2, port_mmio + PHY_MODE2);
2046 m2 = readl(port_mmio + PHY_MODE2);
2047 m2 &= ~((1 << 16) | (1 << 31));
2048 writel(m2, port_mmio + PHY_MODE2);
2053 /* who knows what this magic does */
2054 tmp = readl(port_mmio + PHY_MODE3);
2057 writel(tmp, port_mmio + PHY_MODE3);
2059 if (fix_phy_mode4) {
2062 m4 = readl(port_mmio + PHY_MODE4);
2064 if (hp_flags & MV_HP_ERRATA_60X1B2)
2065 tmp = readl(port_mmio + 0x310);
2067 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2069 writel(m4, port_mmio + PHY_MODE4);
2071 if (hp_flags & MV_HP_ERRATA_60X1B2)
2072 writel(tmp, port_mmio + 0x310);
2075 /* Revert values of pre-emphasis and signal amps to the saved ones */
2076 m2 = readl(port_mmio + PHY_MODE2);
2078 m2 &= ~MV_M2_PREAMP_MASK;
2079 m2 |= hpriv->signal[port].amps;
2080 m2 |= hpriv->signal[port].pre;
2083 /* according to mvSata 3.6.1, some IIE values are fixed */
2084 if (IS_GEN_IIE(hpriv)) {
2089 writel(m2, port_mmio + PHY_MODE2);
2092 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2093 unsigned int port_no)
2095 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2097 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2099 if (IS_GEN_II(hpriv)) {
2100 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2101 ifctl |= (1 << 7); /* enable gen2i speed */
2102 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2103 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2106 udelay(25); /* allow reset propagation */
2108 /* Spec never mentions clearing the bit. Marvell's driver does
2109 * clear the bit, however.
2111 writelfl(0, port_mmio + EDMA_CMD_OFS);
2113 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2115 if (IS_GEN_I(hpriv))
2120 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2121 * @ap: ATA channel to manipulate
2123 * Part of this is taken from __sata_phy_reset and modified to
2124 * not sleep since this routine gets called from interrupt level.
2127 * Inherited from caller. This is coded to safe to call at
2128 * interrupt level, i.e. it does not sleep.
2130 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2131 unsigned long deadline)
2133 struct mv_port_priv *pp = ap->private_data;
2134 struct mv_host_priv *hpriv = ap->host->private_data;
2135 void __iomem *port_mmio = mv_ap_base(ap);
2139 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2141 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2142 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2143 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2145 /* Issue COMRESET via SControl */
2147 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2150 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2154 sata_scr_read(ap, SCR_STATUS, &sstatus);
2155 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2159 } while (time_before(jiffies, deadline));
2161 /* work around errata */
2162 if (IS_GEN_II(hpriv) &&
2163 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2165 goto comreset_retry;
2167 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2168 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2169 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2171 if (ata_port_offline(ap)) {
2172 *class = ATA_DEV_NONE;
2176 /* even after SStatus reflects that device is ready,
2177 * it seems to take a while for link to be fully
2178 * established (and thus Status no longer 0x80/0x7F),
2179 * so we poll a bit for that, here.
2183 u8 drv_stat = ata_check_status(ap);
2184 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2189 if (time_after(jiffies, deadline))
2193 /* FIXME: if we passed the deadline, the following
2194 * code probably produces an invalid result
2197 /* finally, read device signature from TF registers */
2198 *class = ata_dev_try_classify(ap, 0, NULL);
2200 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2202 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2207 static int mv_prereset(struct ata_port *ap, unsigned long deadline)
2209 struct mv_port_priv *pp = ap->private_data;
2210 struct ata_eh_context *ehc = &ap->eh_context;
2213 rc = mv_stop_dma(ap);
2215 ehc->i.action |= ATA_EH_HARDRESET;
2217 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2218 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2219 ehc->i.action |= ATA_EH_HARDRESET;
2222 /* if we're about to do hardreset, nothing more to do */
2223 if (ehc->i.action & ATA_EH_HARDRESET)
2226 if (ata_port_online(ap))
2227 rc = ata_wait_ready(ap, deadline);
2234 static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2235 unsigned long deadline)
2237 struct mv_host_priv *hpriv = ap->host->private_data;
2238 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2242 mv_channel_reset(hpriv, mmio, ap->port_no);
2244 mv_phy_reset(ap, class, deadline);
2249 static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2253 /* print link status */
2254 sata_print_link_status(ap);
2257 sata_scr_read(ap, SCR_ERROR, &serr);
2258 sata_scr_write_flush(ap, SCR_ERROR, serr);
2260 /* bail out if no device is present */
2261 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2262 DPRINTK("EXIT, no device\n");
2266 /* set up device control */
2267 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2270 static void mv_error_handler(struct ata_port *ap)
2272 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2273 mv_hardreset, mv_postreset);
2276 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2278 mv_stop_dma(qc->ap);
2281 static void mv_eh_freeze(struct ata_port *ap)
2283 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2284 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2288 /* FIXME: handle coalescing completion events properly */
2290 shift = ap->port_no * 2;
2294 mask = 0x3 << shift;
2296 /* disable assertion of portN err, done events */
2297 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2298 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2301 static void mv_eh_thaw(struct ata_port *ap)
2303 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2304 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2305 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2306 void __iomem *port_mmio = mv_ap_base(ap);
2307 u32 tmp, mask, hc_irq_cause;
2308 unsigned int shift, hc_port_no = ap->port_no;
2310 /* FIXME: handle coalescing completion events properly */
2312 shift = ap->port_no * 2;
2318 mask = 0x3 << shift;
2320 /* clear EDMA errors on this port */
2321 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2323 /* clear pending irq events */
2324 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2325 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2326 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2327 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2329 /* enable assertion of portN err, done events */
2330 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2331 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2335 * mv_port_init - Perform some early initialization on a single port.
2336 * @port: libata data structure storing shadow register addresses
2337 * @port_mmio: base address of the port
2339 * Initialize shadow register mmio addresses, clear outstanding
2340 * interrupts on the port, and unmask interrupts for the future
2341 * start of the port.
2344 * Inherited from caller.
2346 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2348 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2351 /* PIO related setup
2353 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2355 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2356 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2357 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2358 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2359 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2360 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2362 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2363 /* special case: control/altstatus doesn't have ATA_REG_ address */
2364 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2367 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2369 /* Clear any currently outstanding port interrupt conditions */
2370 serr_ofs = mv_scr_offset(SCR_ERROR);
2371 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2372 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2374 /* unmask all EDMA error interrupts */
2375 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2377 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2378 readl(port_mmio + EDMA_CFG_OFS),
2379 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2380 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2383 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2385 struct pci_dev *pdev = to_pci_dev(host->dev);
2386 struct mv_host_priv *hpriv = host->private_data;
2387 u32 hp_flags = hpriv->hp_flags;
2391 hpriv->ops = &mv5xxx_ops;
2392 hp_flags |= MV_HP_GEN_I;
2394 switch (pdev->revision) {
2396 hp_flags |= MV_HP_ERRATA_50XXB0;
2399 hp_flags |= MV_HP_ERRATA_50XXB2;
2402 dev_printk(KERN_WARNING, &pdev->dev,
2403 "Applying 50XXB2 workarounds to unknown rev\n");
2404 hp_flags |= MV_HP_ERRATA_50XXB2;
2411 hpriv->ops = &mv5xxx_ops;
2412 hp_flags |= MV_HP_GEN_I;
2414 switch (pdev->revision) {
2416 hp_flags |= MV_HP_ERRATA_50XXB0;
2419 hp_flags |= MV_HP_ERRATA_50XXB2;
2422 dev_printk(KERN_WARNING, &pdev->dev,
2423 "Applying B2 workarounds to unknown rev\n");
2424 hp_flags |= MV_HP_ERRATA_50XXB2;
2431 hpriv->ops = &mv6xxx_ops;
2432 hp_flags |= MV_HP_GEN_II;
2434 switch (pdev->revision) {
2436 hp_flags |= MV_HP_ERRATA_60X1B2;
2439 hp_flags |= MV_HP_ERRATA_60X1C0;
2442 dev_printk(KERN_WARNING, &pdev->dev,
2443 "Applying B2 workarounds to unknown rev\n");
2444 hp_flags |= MV_HP_ERRATA_60X1B2;
2451 hpriv->ops = &mv6xxx_ops;
2452 hp_flags |= MV_HP_GEN_IIE;
2454 switch (pdev->revision) {
2456 hp_flags |= MV_HP_ERRATA_XX42A0;
2459 hp_flags |= MV_HP_ERRATA_60X1C0;
2462 dev_printk(KERN_WARNING, &pdev->dev,
2463 "Applying 60X1C0 workarounds to unknown rev\n");
2464 hp_flags |= MV_HP_ERRATA_60X1C0;
2470 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2474 hpriv->hp_flags = hp_flags;
2480 * mv_init_host - Perform some early initialization of the host.
2481 * @host: ATA host to initialize
2482 * @board_idx: controller index
2484 * If possible, do an early global reset of the host. Then do
2485 * our port init and clear/unmask all/relevant host interrupts.
2488 * Inherited from caller.
2490 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2492 int rc = 0, n_hc, port, hc;
2493 struct pci_dev *pdev = to_pci_dev(host->dev);
2494 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2495 struct mv_host_priv *hpriv = host->private_data;
2497 /* global interrupt mask */
2498 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2500 rc = mv_chip_id(host, board_idx);
2504 n_hc = mv_get_hc_count(host->ports[0]->flags);
2506 for (port = 0; port < host->n_ports; port++)
2507 hpriv->ops->read_preamp(hpriv, port, mmio);
2509 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2513 hpriv->ops->reset_flash(hpriv, mmio);
2514 hpriv->ops->reset_bus(pdev, mmio);
2515 hpriv->ops->enable_leds(hpriv, mmio);
2517 for (port = 0; port < host->n_ports; port++) {
2518 if (IS_GEN_II(hpriv)) {
2519 void __iomem *port_mmio = mv_port_base(mmio, port);
2521 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2522 ifctl |= (1 << 7); /* enable gen2i speed */
2523 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2524 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2527 hpriv->ops->phy_errata(hpriv, mmio, port);
2530 for (port = 0; port < host->n_ports; port++) {
2531 void __iomem *port_mmio = mv_port_base(mmio, port);
2532 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2535 for (hc = 0; hc < n_hc; hc++) {
2536 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2538 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2539 "(before clear)=0x%08x\n", hc,
2540 readl(hc_mmio + HC_CFG_OFS),
2541 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2543 /* Clear any currently outstanding hc interrupt conditions */
2544 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2547 /* Clear any currently outstanding host interrupt conditions */
2548 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2550 /* and unmask interrupt generation for host regs */
2551 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2553 if (IS_GEN_I(hpriv))
2554 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2556 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2558 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2559 "PCI int cause/mask=0x%08x/0x%08x\n",
2560 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2561 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2562 readl(mmio + PCI_IRQ_CAUSE_OFS),
2563 readl(mmio + PCI_IRQ_MASK_OFS));
2570 * mv_print_info - Dump key info to kernel log for perusal.
2571 * @host: ATA host to print info about
2573 * FIXME: complete this.
2576 * Inherited from caller.
2578 static void mv_print_info(struct ata_host *host)
2580 struct pci_dev *pdev = to_pci_dev(host->dev);
2581 struct mv_host_priv *hpriv = host->private_data;
2583 const char *scc_s, *gen;
2585 /* Use this to determine the HW stepping of the chip so we know
2586 * what errata to workaround
2588 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2591 else if (scc == 0x01)
2596 if (IS_GEN_I(hpriv))
2598 else if (IS_GEN_II(hpriv))
2600 else if (IS_GEN_IIE(hpriv))
2605 dev_printk(KERN_INFO, &pdev->dev,
2606 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2607 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2608 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2612 * mv_init_one - handle a positive probe of a Marvell host
2613 * @pdev: PCI device found
2614 * @ent: PCI device ID entry for the matched host
2617 * Inherited from caller.
2619 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2621 static int printed_version = 0;
2622 unsigned int board_idx = (unsigned int)ent->driver_data;
2623 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2624 struct ata_host *host;
2625 struct mv_host_priv *hpriv;
2628 if (!printed_version++)
2629 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2632 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2634 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2635 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2636 if (!host || !hpriv)
2638 host->private_data = hpriv;
2640 /* acquire resources */
2641 rc = pcim_enable_device(pdev);
2645 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2647 pcim_pin_device(pdev);
2650 host->iomap = pcim_iomap_table(pdev);
2652 rc = pci_go_64(pdev);
2656 /* initialize adapter */
2657 rc = mv_init_host(host, board_idx);
2661 /* Enable interrupts */
2662 if (msi && pci_enable_msi(pdev))
2665 mv_dump_pci_cfg(pdev, 0x68);
2666 mv_print_info(host);
2668 pci_set_master(pdev);
2670 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2671 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2674 static int __init mv_init(void)
2676 return pci_register_driver(&mv_pci_driver);
2679 static void __exit mv_exit(void)
2681 pci_unregister_driver(&mv_pci_driver);
2684 MODULE_AUTHOR("Brett Russ");
2685 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2686 MODULE_LICENSE("GPL");
2687 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2688 MODULE_VERSION(DRV_VERSION);
2690 module_param(msi, int, 0444);
2691 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2693 module_init(mv_init);
2694 module_exit(mv_exit);