2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <linux/libata.h>
74 #define DRV_NAME "sata_mv"
75 #define DRV_VERSION "1.0"
78 /* BAR's are enumerated in terms of pci_resource_start() terms */
79 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
80 MV_IO_BAR = 2, /* offset 0x18: IO space */
81 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
84 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
87 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
88 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
89 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
90 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
91 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
92 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94 MV_SATAHC0_REG_BASE = 0x20000,
95 MV_FLASH_CTL = 0x1046c,
96 MV_GPIO_PORT_CTL = 0x104f0,
97 MV_RESET_CFG = 0x180d8,
99 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
100 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
102 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
105 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
108 * CRPB needs alignment on a 256B boundary. Size == 256B
109 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
116 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
119 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
120 MV_PORT_HC_SHIFT = 2,
121 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
125 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
126 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
127 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
128 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
129 ATA_FLAG_PIO_POLLING,
130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
132 CRQB_FLAG_READ = (1 << 0),
134 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
135 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
136 CRQB_CMD_ADDR_SHIFT = 8,
137 CRQB_CMD_CS = (0x2 << 11),
138 CRQB_CMD_LAST = (1 << 15),
140 CRPB_FLAG_STATUS_SHIFT = 8,
141 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
142 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
144 EPRD_FLAG_END_OF_TBL = (1 << 31),
146 /* PCI interface registers */
148 PCI_COMMAND_OFS = 0xc00,
150 PCI_MAIN_CMD_STS_OFS = 0xd30,
151 STOP_PCI_MASTER = (1 << 2),
152 PCI_MASTER_EMPTY = (1 << 3),
153 GLOB_SFT_RST = (1 << 4),
156 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
157 MV_PCI_DISC_TIMER = 0xd04,
158 MV_PCI_MSI_TRIGGER = 0xc38,
159 MV_PCI_SERR_MASK = 0xc28,
160 MV_PCI_XBAR_TMOUT = 0x1d04,
161 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
162 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
163 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
164 MV_PCI_ERR_COMMAND = 0x1d50,
166 PCI_IRQ_CAUSE_OFS = 0x1d58,
167 PCI_IRQ_MASK_OFS = 0x1d5c,
168 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
171 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
172 PORT0_ERR = (1 << 0), /* shift by port # */
173 PORT0_DONE = (1 << 1), /* shift by port # */
174 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
175 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
177 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
178 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
179 PORTS_0_3_COAL_DONE = (1 << 8),
180 PORTS_4_7_COAL_DONE = (1 << 17),
181 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
182 GPIO_INT = (1 << 22),
183 SELF_INT = (1 << 23),
184 TWSI_INT = (1 << 24),
185 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
186 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
187 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
188 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
190 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
193 /* SATAHC registers */
196 HC_IRQ_CAUSE_OFS = 0x14,
197 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
198 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
199 DEV_IRQ = (1 << 8), /* shift by port # */
201 /* Shadow block registers */
203 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
206 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
207 SATA_ACTIVE_OFS = 0x350,
214 SATA_INTERFACE_CTL = 0x050,
216 MV_M2_PREAMP_MASK = 0x7e0,
220 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
221 EDMA_CFG_NCQ = (1 << 5),
222 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
223 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
224 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
226 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
227 EDMA_ERR_IRQ_MASK_OFS = 0xc,
228 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
229 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
230 EDMA_ERR_DEV = (1 << 2), /* device error */
231 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
232 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
233 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
234 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
235 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
236 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
237 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
238 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
239 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
240 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
241 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
242 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
243 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
244 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
245 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
246 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
247 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
248 EDMA_ERR_OVERRUN_5 = (1 << 5),
249 EDMA_ERR_UNDERRUN_5 = (1 << 6),
250 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
260 EDMA_ERR_LNK_CTRL_RX_2 |
261 EDMA_ERR_LNK_DATA_RX |
262 EDMA_ERR_LNK_DATA_TX |
263 EDMA_ERR_TRANS_PROTO,
264 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
269 EDMA_ERR_UNDERRUN_5 |
270 EDMA_ERR_SELF_DIS_5 |
276 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
277 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
279 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
280 EDMA_REQ_Q_PTR_SHIFT = 5,
282 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
283 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
284 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
285 EDMA_RSP_Q_PTR_SHIFT = 3,
287 EDMA_CMD_OFS = 0x28, /* EDMA command register */
288 EDMA_EN = (1 << 0), /* enable EDMA */
289 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
290 ATA_RST = (1 << 2), /* reset trans/link/phy */
292 EDMA_IORDY_TMOUT = 0x34,
295 /* Host private flags (hp_flags) */
296 MV_HP_FLAG_MSI = (1 << 0),
297 MV_HP_ERRATA_50XXB0 = (1 << 1),
298 MV_HP_ERRATA_50XXB2 = (1 << 2),
299 MV_HP_ERRATA_60X1B2 = (1 << 3),
300 MV_HP_ERRATA_60X1C0 = (1 << 4),
301 MV_HP_ERRATA_XX42A0 = (1 << 5),
302 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
303 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
304 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
306 /* Port private flags (pp_flags) */
307 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
308 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
311 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
312 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
313 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
316 MV_DMA_BOUNDARY = 0xffffffffU,
318 /* mask of register bits containing lower 32 bits
319 * of EDMA request queue DMA address
321 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
323 /* ditto, for response queue */
324 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
337 /* Command ReQuest Block: 32B */
353 /* Command ResPonse Block: 8B */
360 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
368 struct mv_port_priv {
369 struct mv_crqb *crqb;
371 struct mv_crpb *crpb;
373 struct mv_sg *sg_tbl;
374 dma_addr_t sg_tbl_dma;
376 unsigned int req_idx;
377 unsigned int resp_idx;
382 struct mv_port_signal {
389 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
391 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
392 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
394 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
396 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
397 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
400 struct mv_host_priv {
402 struct mv_port_signal signal[8];
403 const struct mv_hw_ops *ops;
406 static void mv_irq_clear(struct ata_port *ap);
407 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
408 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
409 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
410 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
411 static int mv_port_start(struct ata_port *ap);
412 static void mv_port_stop(struct ata_port *ap);
413 static void mv_qc_prep(struct ata_queued_cmd *qc);
414 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
415 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
416 static void mv_error_handler(struct ata_port *ap);
417 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
418 static void mv_eh_freeze(struct ata_port *ap);
419 static void mv_eh_thaw(struct ata_port *ap);
420 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
422 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
424 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
425 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
427 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
429 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
430 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
432 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
434 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
435 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
437 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
439 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
440 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
441 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
442 unsigned int port_no);
444 static struct scsi_host_template mv5_sht = {
445 .module = THIS_MODULE,
447 .ioctl = ata_scsi_ioctl,
448 .queuecommand = ata_scsi_queuecmd,
449 .can_queue = ATA_DEF_QUEUE,
450 .this_id = ATA_SHT_THIS_ID,
451 .sg_tablesize = MV_MAX_SG_CT,
452 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
453 .emulated = ATA_SHT_EMULATED,
455 .proc_name = DRV_NAME,
456 .dma_boundary = MV_DMA_BOUNDARY,
457 .slave_configure = ata_scsi_slave_config,
458 .slave_destroy = ata_scsi_slave_destroy,
459 .bios_param = ata_std_bios_param,
462 static struct scsi_host_template mv6_sht = {
463 .module = THIS_MODULE,
465 .ioctl = ata_scsi_ioctl,
466 .queuecommand = ata_scsi_queuecmd,
467 .can_queue = ATA_DEF_QUEUE,
468 .this_id = ATA_SHT_THIS_ID,
469 .sg_tablesize = MV_MAX_SG_CT,
470 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
471 .emulated = ATA_SHT_EMULATED,
473 .proc_name = DRV_NAME,
474 .dma_boundary = MV_DMA_BOUNDARY,
475 .slave_configure = ata_scsi_slave_config,
476 .slave_destroy = ata_scsi_slave_destroy,
477 .bios_param = ata_std_bios_param,
480 static const struct ata_port_operations mv5_ops = {
481 .port_disable = ata_port_disable,
483 .tf_load = ata_tf_load,
484 .tf_read = ata_tf_read,
485 .check_status = ata_check_status,
486 .exec_command = ata_exec_command,
487 .dev_select = ata_std_dev_select,
489 .cable_detect = ata_cable_sata,
491 .qc_prep = mv_qc_prep,
492 .qc_issue = mv_qc_issue,
493 .data_xfer = ata_data_xfer,
495 .irq_clear = mv_irq_clear,
496 .irq_on = ata_irq_on,
497 .irq_ack = ata_irq_ack,
499 .error_handler = mv_error_handler,
500 .post_internal_cmd = mv_post_int_cmd,
501 .freeze = mv_eh_freeze,
504 .scr_read = mv5_scr_read,
505 .scr_write = mv5_scr_write,
507 .port_start = mv_port_start,
508 .port_stop = mv_port_stop,
511 static const struct ata_port_operations mv6_ops = {
512 .port_disable = ata_port_disable,
514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
520 .cable_detect = ata_cable_sata,
522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
524 .data_xfer = ata_data_xfer,
526 .irq_clear = mv_irq_clear,
527 .irq_on = ata_irq_on,
528 .irq_ack = ata_irq_ack,
530 .error_handler = mv_error_handler,
531 .post_internal_cmd = mv_post_int_cmd,
532 .freeze = mv_eh_freeze,
535 .scr_read = mv_scr_read,
536 .scr_write = mv_scr_write,
538 .port_start = mv_port_start,
539 .port_stop = mv_port_stop,
542 static const struct ata_port_operations mv_iie_ops = {
543 .port_disable = ata_port_disable,
545 .tf_load = ata_tf_load,
546 .tf_read = ata_tf_read,
547 .check_status = ata_check_status,
548 .exec_command = ata_exec_command,
549 .dev_select = ata_std_dev_select,
551 .cable_detect = ata_cable_sata,
553 .qc_prep = mv_qc_prep_iie,
554 .qc_issue = mv_qc_issue,
555 .data_xfer = ata_data_xfer,
557 .irq_clear = mv_irq_clear,
558 .irq_on = ata_irq_on,
559 .irq_ack = ata_irq_ack,
561 .error_handler = mv_error_handler,
562 .post_internal_cmd = mv_post_int_cmd,
563 .freeze = mv_eh_freeze,
566 .scr_read = mv_scr_read,
567 .scr_write = mv_scr_write,
569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
573 static const struct ata_port_info mv_port_info[] = {
575 .flags = MV_COMMON_FLAGS,
576 .pio_mask = 0x1f, /* pio0-4 */
577 .udma_mask = ATA_UDMA6,
578 .port_ops = &mv5_ops,
581 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
582 .pio_mask = 0x1f, /* pio0-4 */
583 .udma_mask = ATA_UDMA6,
584 .port_ops = &mv5_ops,
587 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
588 .pio_mask = 0x1f, /* pio0-4 */
589 .udma_mask = ATA_UDMA6,
590 .port_ops = &mv5_ops,
593 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
594 .pio_mask = 0x1f, /* pio0-4 */
595 .udma_mask = ATA_UDMA6,
596 .port_ops = &mv6_ops,
599 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
601 .pio_mask = 0x1f, /* pio0-4 */
602 .udma_mask = ATA_UDMA6,
603 .port_ops = &mv6_ops,
606 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
607 .pio_mask = 0x1f, /* pio0-4 */
608 .udma_mask = ATA_UDMA6,
609 .port_ops = &mv_iie_ops,
612 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
613 .pio_mask = 0x1f, /* pio0-4 */
614 .udma_mask = ATA_UDMA6,
615 .port_ops = &mv_iie_ops,
619 static const struct pci_device_id mv_pci_tbl[] = {
620 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
621 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
622 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
623 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
624 /* RocketRAID 1740/174x have different identifiers */
625 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
626 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
628 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
629 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
630 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
631 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
632 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
634 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
637 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
639 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
641 /* add Marvell 7042 support */
642 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
644 { } /* terminate list */
647 static struct pci_driver mv_pci_driver = {
649 .id_table = mv_pci_tbl,
650 .probe = mv_init_one,
651 .remove = ata_pci_remove_one,
654 static const struct mv_hw_ops mv5xxx_ops = {
655 .phy_errata = mv5_phy_errata,
656 .enable_leds = mv5_enable_leds,
657 .read_preamp = mv5_read_preamp,
658 .reset_hc = mv5_reset_hc,
659 .reset_flash = mv5_reset_flash,
660 .reset_bus = mv5_reset_bus,
663 static const struct mv_hw_ops mv6xxx_ops = {
664 .phy_errata = mv6_phy_errata,
665 .enable_leds = mv6_enable_leds,
666 .read_preamp = mv6_read_preamp,
667 .reset_hc = mv6_reset_hc,
668 .reset_flash = mv6_reset_flash,
669 .reset_bus = mv_reset_pci_bus,
675 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
678 /* move to PCI layer or libata core? */
679 static int pci_go_64(struct pci_dev *pdev)
683 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
684 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
686 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
688 dev_printk(KERN_ERR, &pdev->dev,
689 "64-bit DMA enable failed\n");
694 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
696 dev_printk(KERN_ERR, &pdev->dev,
697 "32-bit DMA enable failed\n");
700 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
702 dev_printk(KERN_ERR, &pdev->dev,
703 "32-bit consistent DMA enable failed\n");
715 static inline void writelfl(unsigned long data, void __iomem *addr)
718 (void) readl(addr); /* flush to avoid PCI posted write */
721 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
723 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
726 static inline unsigned int mv_hc_from_port(unsigned int port)
728 return port >> MV_PORT_HC_SHIFT;
731 static inline unsigned int mv_hardport_from_port(unsigned int port)
733 return port & MV_PORT_MASK;
736 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
739 return mv_hc_base(base, mv_hc_from_port(port));
742 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
744 return mv_hc_base_from_port(base, port) +
745 MV_SATAHC_ARBTR_REG_SZ +
746 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
749 static inline void __iomem *mv_ap_base(struct ata_port *ap)
751 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
754 static inline int mv_get_hc_count(unsigned long port_flags)
756 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
759 static void mv_irq_clear(struct ata_port *ap)
763 static void mv_set_edma_ptrs(void __iomem *port_mmio,
764 struct mv_host_priv *hpriv,
765 struct mv_port_priv *pp)
770 * initialize request queue
772 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
774 WARN_ON(pp->crqb_dma & 0x3ff);
775 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
776 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
777 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
779 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
780 writelfl((pp->crqb_dma & 0xffffffff) | index,
781 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
783 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
786 * initialize response queue
788 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
790 WARN_ON(pp->crpb_dma & 0xff);
791 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
793 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
794 writelfl((pp->crpb_dma & 0xffffffff) | index,
795 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
797 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
799 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
800 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
804 * mv_start_dma - Enable eDMA engine
805 * @base: port base address
806 * @pp: port private data
808 * Verify the local cache of the eDMA state is accurate with a
812 * Inherited from caller.
814 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
815 struct mv_port_priv *pp)
817 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
818 /* clear EDMA event indicators, if any */
819 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
821 mv_set_edma_ptrs(base, hpriv, pp);
823 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
824 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
826 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
830 * __mv_stop_dma - Disable eDMA engine
831 * @ap: ATA channel to manipulate
833 * Verify the local cache of the eDMA state is accurate with a
837 * Inherited from caller.
839 static int __mv_stop_dma(struct ata_port *ap)
841 void __iomem *port_mmio = mv_ap_base(ap);
842 struct mv_port_priv *pp = ap->private_data;
846 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
847 /* Disable EDMA if active. The disable bit auto clears.
849 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
850 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
852 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
855 /* now properly wait for the eDMA to stop */
856 for (i = 1000; i > 0; i--) {
857 reg = readl(port_mmio + EDMA_CMD_OFS);
858 if (!(reg & EDMA_EN))
865 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
872 static int mv_stop_dma(struct ata_port *ap)
877 spin_lock_irqsave(&ap->host->lock, flags);
878 rc = __mv_stop_dma(ap);
879 spin_unlock_irqrestore(&ap->host->lock, flags);
885 static void mv_dump_mem(void __iomem *start, unsigned bytes)
888 for (b = 0; b < bytes; ) {
889 DPRINTK("%p: ", start + b);
890 for (w = 0; b < bytes && w < 4; w++) {
891 printk("%08x ",readl(start + b));
899 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
904 for (b = 0; b < bytes; ) {
905 DPRINTK("%02x: ", b);
906 for (w = 0; b < bytes && w < 4; w++) {
907 (void) pci_read_config_dword(pdev,b,&dw);
915 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
916 struct pci_dev *pdev)
919 void __iomem *hc_base = mv_hc_base(mmio_base,
920 port >> MV_PORT_HC_SHIFT);
921 void __iomem *port_base;
922 int start_port, num_ports, p, start_hc, num_hcs, hc;
925 start_hc = start_port = 0;
926 num_ports = 8; /* shld be benign for 4 port devs */
929 start_hc = port >> MV_PORT_HC_SHIFT;
931 num_ports = num_hcs = 1;
933 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
934 num_ports > 1 ? num_ports - 1 : start_port);
937 DPRINTK("PCI config space regs:\n");
938 mv_dump_pci_cfg(pdev, 0x68);
940 DPRINTK("PCI regs:\n");
941 mv_dump_mem(mmio_base+0xc00, 0x3c);
942 mv_dump_mem(mmio_base+0xd00, 0x34);
943 mv_dump_mem(mmio_base+0xf00, 0x4);
944 mv_dump_mem(mmio_base+0x1d00, 0x6c);
945 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
946 hc_base = mv_hc_base(mmio_base, hc);
947 DPRINTK("HC regs (HC %i):\n", hc);
948 mv_dump_mem(hc_base, 0x1c);
950 for (p = start_port; p < start_port + num_ports; p++) {
951 port_base = mv_port_base(mmio_base, p);
952 DPRINTK("EDMA regs (port %i):\n",p);
953 mv_dump_mem(port_base, 0x54);
954 DPRINTK("SATA regs (port %i):\n",p);
955 mv_dump_mem(port_base+0x300, 0x60);
960 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
968 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
971 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
980 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
982 unsigned int ofs = mv_scr_offset(sc_reg_in);
984 if (ofs != 0xffffffffU) {
985 *val = readl(mv_ap_base(ap) + ofs);
991 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
993 unsigned int ofs = mv_scr_offset(sc_reg_in);
995 if (ofs != 0xffffffffU) {
996 writelfl(val, mv_ap_base(ap) + ofs);
1002 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1003 void __iomem *port_mmio)
1005 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1007 /* set up non-NCQ EDMA configuration */
1008 cfg &= ~(1 << 9); /* disable eQue */
1010 if (IS_GEN_I(hpriv)) {
1011 cfg &= ~0x1f; /* clear queue depth */
1012 cfg |= (1 << 8); /* enab config burst size mask */
1015 else if (IS_GEN_II(hpriv)) {
1016 cfg &= ~0x1f; /* clear queue depth */
1017 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1018 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1021 else if (IS_GEN_IIE(hpriv)) {
1022 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1023 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1024 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1025 cfg |= (1 << 18); /* enab early completion */
1026 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1027 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
1028 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1031 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1035 * mv_port_start - Port specific init/start routine.
1036 * @ap: ATA channel to manipulate
1038 * Allocate and point to DMA memory, init port private memory,
1042 * Inherited from caller.
1044 static int mv_port_start(struct ata_port *ap)
1046 struct device *dev = ap->host->dev;
1047 struct mv_host_priv *hpriv = ap->host->private_data;
1048 struct mv_port_priv *pp;
1049 void __iomem *port_mmio = mv_ap_base(ap);
1052 unsigned long flags;
1055 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1059 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1063 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1065 rc = ata_pad_alloc(ap, dev);
1069 /* First item in chunk of DMA memory:
1070 * 32-slot command request table (CRQB), 32 bytes each in size
1073 pp->crqb_dma = mem_dma;
1074 mem += MV_CRQB_Q_SZ;
1075 mem_dma += MV_CRQB_Q_SZ;
1078 * 32-slot command response table (CRPB), 8 bytes each in size
1081 pp->crpb_dma = mem_dma;
1082 mem += MV_CRPB_Q_SZ;
1083 mem_dma += MV_CRPB_Q_SZ;
1086 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1089 pp->sg_tbl_dma = mem_dma;
1091 spin_lock_irqsave(&ap->host->lock, flags);
1093 mv_edma_cfg(ap, hpriv, port_mmio);
1095 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1097 spin_unlock_irqrestore(&ap->host->lock, flags);
1099 /* Don't turn on EDMA here...do it before DMA commands only. Else
1100 * we'll be unable to send non-data, PIO, etc due to restricted access
1103 ap->private_data = pp;
1108 * mv_port_stop - Port specific cleanup/stop routine.
1109 * @ap: ATA channel to manipulate
1111 * Stop DMA, cleanup port memory.
1114 * This routine uses the host lock to protect the DMA stop.
1116 static void mv_port_stop(struct ata_port *ap)
1122 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1123 * @qc: queued command whose SG list to source from
1125 * Populate the SG list and mark the last entry.
1128 * Inherited from caller.
1130 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1132 struct mv_port_priv *pp = qc->ap->private_data;
1133 unsigned int n_sg = 0;
1134 struct scatterlist *sg;
1135 struct mv_sg *mv_sg;
1138 ata_for_each_sg(sg, qc) {
1139 dma_addr_t addr = sg_dma_address(sg);
1140 u32 sg_len = sg_dma_len(sg);
1142 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1143 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1144 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1146 if (ata_sg_is_last(sg, qc))
1147 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1156 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1158 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1159 (last ? CRQB_CMD_LAST : 0);
1160 *cmdw = cpu_to_le16(tmp);
1164 * mv_qc_prep - Host specific command preparation.
1165 * @qc: queued command to prepare
1167 * This routine simply redirects to the general purpose routine
1168 * if command is not DMA. Else, it handles prep of the CRQB
1169 * (command request block), does some sanity checking, and calls
1170 * the SG load routine.
1173 * Inherited from caller.
1175 static void mv_qc_prep(struct ata_queued_cmd *qc)
1177 struct ata_port *ap = qc->ap;
1178 struct mv_port_priv *pp = ap->private_data;
1180 struct ata_taskfile *tf;
1184 if (qc->tf.protocol != ATA_PROT_DMA)
1187 /* Fill in command request block
1189 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1190 flags |= CRQB_FLAG_READ;
1191 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1192 flags |= qc->tag << CRQB_TAG_SHIFT;
1193 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1195 /* get current queue index from software */
1196 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1198 pp->crqb[in_index].sg_addr =
1199 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1200 pp->crqb[in_index].sg_addr_hi =
1201 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1202 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1204 cw = &pp->crqb[in_index].ata_cmd[0];
1207 /* Sadly, the CRQB cannot accomodate all registers--there are
1208 * only 11 bytes...so we must pick and choose required
1209 * registers based on the command. So, we drop feature and
1210 * hob_feature for [RW] DMA commands, but they are needed for
1211 * NCQ. NCQ will drop hob_nsect.
1213 switch (tf->command) {
1215 case ATA_CMD_READ_EXT:
1217 case ATA_CMD_WRITE_EXT:
1218 case ATA_CMD_WRITE_FUA_EXT:
1219 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1221 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1222 case ATA_CMD_FPDMA_READ:
1223 case ATA_CMD_FPDMA_WRITE:
1224 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1225 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1227 #endif /* FIXME: remove this line when NCQ added */
1229 /* The only other commands EDMA supports in non-queued and
1230 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1231 * of which are defined/used by Linux. If we get here, this
1232 * driver needs work.
1234 * FIXME: modify libata to give qc_prep a return value and
1235 * return error here.
1237 BUG_ON(tf->command);
1240 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1241 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1242 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1243 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1244 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1245 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1246 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1247 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1248 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1250 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1256 * mv_qc_prep_iie - Host specific command preparation.
1257 * @qc: queued command to prepare
1259 * This routine simply redirects to the general purpose routine
1260 * if command is not DMA. Else, it handles prep of the CRQB
1261 * (command request block), does some sanity checking, and calls
1262 * the SG load routine.
1265 * Inherited from caller.
1267 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1269 struct ata_port *ap = qc->ap;
1270 struct mv_port_priv *pp = ap->private_data;
1271 struct mv_crqb_iie *crqb;
1272 struct ata_taskfile *tf;
1276 if (qc->tf.protocol != ATA_PROT_DMA)
1279 /* Fill in Gen IIE command request block
1281 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1282 flags |= CRQB_FLAG_READ;
1284 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1285 flags |= qc->tag << CRQB_TAG_SHIFT;
1286 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1287 what we use as our tag */
1289 /* get current queue index from software */
1290 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1292 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1293 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1294 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1295 crqb->flags = cpu_to_le32(flags);
1298 crqb->ata_cmd[0] = cpu_to_le32(
1299 (tf->command << 16) |
1302 crqb->ata_cmd[1] = cpu_to_le32(
1308 crqb->ata_cmd[2] = cpu_to_le32(
1309 (tf->hob_lbal << 0) |
1310 (tf->hob_lbam << 8) |
1311 (tf->hob_lbah << 16) |
1312 (tf->hob_feature << 24)
1314 crqb->ata_cmd[3] = cpu_to_le32(
1316 (tf->hob_nsect << 8)
1319 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1325 * mv_qc_issue - Initiate a command to the host
1326 * @qc: queued command to start
1328 * This routine simply redirects to the general purpose routine
1329 * if command is not DMA. Else, it sanity checks our local
1330 * caches of the request producer/consumer indices then enables
1331 * DMA and bumps the request producer index.
1334 * Inherited from caller.
1336 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1338 struct ata_port *ap = qc->ap;
1339 void __iomem *port_mmio = mv_ap_base(ap);
1340 struct mv_port_priv *pp = ap->private_data;
1341 struct mv_host_priv *hpriv = ap->host->private_data;
1344 if (qc->tf.protocol != ATA_PROT_DMA) {
1345 /* We're about to send a non-EDMA capable command to the
1346 * port. Turn off EDMA so there won't be problems accessing
1347 * shadow block, etc registers.
1350 return ata_qc_issue_prot(qc);
1353 mv_start_dma(port_mmio, hpriv, pp);
1355 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1357 /* until we do queuing, the queue should be empty at this point */
1358 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1359 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1363 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1365 /* and write the request in pointer to kick the EDMA to life */
1366 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1367 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1373 * mv_err_intr - Handle error interrupts on the port
1374 * @ap: ATA channel to manipulate
1375 * @reset_allowed: bool: 0 == don't trigger from reset here
1377 * In most cases, just clear the interrupt and move on. However,
1378 * some cases require an eDMA reset, which is done right before
1379 * the COMRESET in mv_phy_reset(). The SERR case requires a
1380 * clear of pending errors in the SATA SERROR register. Finally,
1381 * if the port disabled DMA, update our cached copy to match.
1384 * Inherited from caller.
1386 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1388 void __iomem *port_mmio = mv_ap_base(ap);
1389 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1390 struct mv_port_priv *pp = ap->private_data;
1391 struct mv_host_priv *hpriv = ap->host->private_data;
1392 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1393 unsigned int action = 0, err_mask = 0;
1394 struct ata_eh_info *ehi = &ap->eh_info;
1396 ata_ehi_clear_desc(ehi);
1398 if (!edma_enabled) {
1399 /* just a guess: do we need to do this? should we
1400 * expand this, and do it in all cases?
1402 sata_scr_read(ap, SCR_ERROR, &serr);
1403 sata_scr_write_flush(ap, SCR_ERROR, serr);
1406 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1408 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1411 * all generations share these EDMA error cause bits
1414 if (edma_err_cause & EDMA_ERR_DEV)
1415 err_mask |= AC_ERR_DEV;
1416 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1417 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1418 EDMA_ERR_INTRL_PAR)) {
1419 err_mask |= AC_ERR_ATA_BUS;
1420 action |= ATA_EH_HARDRESET;
1421 ata_ehi_push_desc(ehi, "parity error");
1423 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1424 ata_ehi_hotplugged(ehi);
1425 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1426 "dev disconnect" : "dev connect");
1429 if (IS_GEN_I(hpriv)) {
1430 eh_freeze_mask = EDMA_EH_FREEZE_5;
1432 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1433 struct mv_port_priv *pp = ap->private_data;
1434 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1435 ata_ehi_push_desc(ehi, "EDMA self-disable");
1438 eh_freeze_mask = EDMA_EH_FREEZE;
1440 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1441 struct mv_port_priv *pp = ap->private_data;
1442 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1443 ata_ehi_push_desc(ehi, "EDMA self-disable");
1446 if (edma_err_cause & EDMA_ERR_SERR) {
1447 sata_scr_read(ap, SCR_ERROR, &serr);
1448 sata_scr_write_flush(ap, SCR_ERROR, serr);
1449 err_mask = AC_ERR_ATA_BUS;
1450 action |= ATA_EH_HARDRESET;
1454 /* Clear EDMA now that SERR cleanup done */
1455 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1458 err_mask = AC_ERR_OTHER;
1459 action |= ATA_EH_HARDRESET;
1462 ehi->serror |= serr;
1463 ehi->action |= action;
1466 qc->err_mask |= err_mask;
1468 ehi->err_mask |= err_mask;
1470 if (edma_err_cause & eh_freeze_mask)
1471 ata_port_freeze(ap);
1476 static void mv_intr_pio(struct ata_port *ap)
1478 struct ata_queued_cmd *qc;
1481 /* ignore spurious intr if drive still BUSY */
1482 ata_status = readb(ap->ioaddr.status_addr);
1483 if (unlikely(ata_status & ATA_BUSY))
1486 /* get active ATA command */
1487 qc = ata_qc_from_tag(ap, ap->active_tag);
1488 if (unlikely(!qc)) /* no active tag */
1490 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1493 /* and finally, complete the ATA command */
1494 qc->err_mask |= ac_err_mask(ata_status);
1495 ata_qc_complete(qc);
1498 static void mv_intr_edma(struct ata_port *ap)
1500 void __iomem *port_mmio = mv_ap_base(ap);
1501 struct mv_host_priv *hpriv = ap->host->private_data;
1502 struct mv_port_priv *pp = ap->private_data;
1503 struct ata_queued_cmd *qc;
1504 u32 out_index, in_index;
1505 bool work_done = false;
1507 /* get h/w response queue pointer */
1508 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1509 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1515 /* get s/w response queue last-read pointer, and compare */
1516 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1517 if (in_index == out_index)
1520 /* 50xx: get active ATA command */
1521 if (IS_GEN_I(hpriv))
1522 tag = ap->active_tag;
1524 /* Gen II/IIE: get active ATA command via tag, to enable
1525 * support for queueing. this works transparently for
1526 * queued and non-queued modes.
1528 else if (IS_GEN_II(hpriv))
1529 tag = (le16_to_cpu(pp->crpb[out_index].id)
1530 >> CRPB_IOID_SHIFT_6) & 0x3f;
1532 else /* IS_GEN_IIE */
1533 tag = (le16_to_cpu(pp->crpb[out_index].id)
1534 >> CRPB_IOID_SHIFT_7) & 0x3f;
1536 qc = ata_qc_from_tag(ap, tag);
1538 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1539 * bits (WARNING: might not necessarily be associated
1540 * with this command), which -should- be clear
1543 status = le16_to_cpu(pp->crpb[out_index].flags);
1544 if (unlikely(status & 0xff)) {
1545 mv_err_intr(ap, qc);
1549 /* and finally, complete the ATA command */
1552 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1553 ata_qc_complete(qc);
1556 /* advance software response queue pointer, to
1557 * indicate (after the loop completes) to hardware
1558 * that we have consumed a response queue entry.
1565 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1566 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1567 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1571 * mv_host_intr - Handle all interrupts on the given host controller
1572 * @host: host specific structure
1573 * @relevant: port error bits relevant to this host controller
1574 * @hc: which host controller we're to look at
1576 * Read then write clear the HC interrupt status then walk each
1577 * port connected to the HC and see if it needs servicing. Port
1578 * success ints are reported in the HC interrupt status reg, the
1579 * port error ints are reported in the higher level main
1580 * interrupt status register and thus are passed in via the
1581 * 'relevant' argument.
1584 * Inherited from caller.
1586 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1588 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1589 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1596 port0 = MV_PORTS_PER_HC;
1598 /* we'll need the HC success int register in most cases */
1599 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1603 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1605 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1606 hc,relevant,hc_irq_cause);
1608 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1609 struct ata_port *ap = host->ports[port];
1610 struct mv_port_priv *pp = ap->private_data;
1611 int have_err_bits, hard_port, shift;
1613 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1616 shift = port << 1; /* (port * 2) */
1617 if (port >= MV_PORTS_PER_HC) {
1618 shift++; /* skip bit 8 in the HC Main IRQ reg */
1620 have_err_bits = ((PORT0_ERR << shift) & relevant);
1622 if (unlikely(have_err_bits)) {
1623 struct ata_queued_cmd *qc;
1625 qc = ata_qc_from_tag(ap, ap->active_tag);
1626 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1629 mv_err_intr(ap, qc);
1633 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1635 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1636 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1639 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1646 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1648 struct ata_port *ap;
1649 struct ata_queued_cmd *qc;
1650 struct ata_eh_info *ehi;
1651 unsigned int i, err_mask, printed = 0;
1654 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1656 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1659 DPRINTK("All regs @ PCI error\n");
1660 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1662 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1664 for (i = 0; i < host->n_ports; i++) {
1665 ap = host->ports[i];
1666 if (!ata_port_offline(ap)) {
1668 ata_ehi_clear_desc(ehi);
1670 ata_ehi_push_desc(ehi,
1671 "PCI err cause 0x%08x", err_cause);
1672 err_mask = AC_ERR_HOST_BUS;
1673 ehi->action = ATA_EH_HARDRESET;
1674 qc = ata_qc_from_tag(ap, ap->active_tag);
1676 qc->err_mask |= err_mask;
1678 ehi->err_mask |= err_mask;
1680 ata_port_freeze(ap);
1686 * mv_interrupt - Main interrupt event handler
1688 * @dev_instance: private data; in this case the host structure
1690 * Read the read only register to determine if any host
1691 * controllers have pending interrupts. If so, call lower level
1692 * routine to handle. Also check for PCI errors which are only
1696 * This routine holds the host lock while processing pending
1699 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1701 struct ata_host *host = dev_instance;
1702 unsigned int hc, handled = 0, n_hcs;
1703 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1706 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1708 /* check the cases where we either have nothing pending or have read
1709 * a bogus register value which can indicate HW removal or PCI fault
1711 if (!irq_stat || (0xffffffffU == irq_stat))
1714 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1715 spin_lock(&host->lock);
1717 if (unlikely(irq_stat & PCI_ERR)) {
1718 mv_pci_error(host, mmio);
1720 goto out_unlock; /* skip all other HC irq handling */
1723 for (hc = 0; hc < n_hcs; hc++) {
1724 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1726 mv_host_intr(host, relevant, hc);
1732 spin_unlock(&host->lock);
1734 return IRQ_RETVAL(handled);
1737 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1739 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1740 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1742 return hc_mmio + ofs;
1745 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1749 switch (sc_reg_in) {
1753 ofs = sc_reg_in * sizeof(u32);
1762 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1764 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1765 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1766 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1768 if (ofs != 0xffffffffU) {
1769 *val = readl(addr + ofs);
1775 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1777 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1778 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1779 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1781 if (ofs != 0xffffffffU) {
1782 writelfl(val, addr + ofs);
1788 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1792 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1795 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1797 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1800 mv_reset_pci_bus(pdev, mmio);
1803 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1805 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1808 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1811 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1814 tmp = readl(phy_mmio + MV5_PHY_MODE);
1816 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1817 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1820 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1824 writel(0, mmio + MV_GPIO_PORT_CTL);
1826 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1828 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1830 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1833 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1836 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1837 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1839 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1842 tmp = readl(phy_mmio + MV5_LT_MODE);
1844 writel(tmp, phy_mmio + MV5_LT_MODE);
1846 tmp = readl(phy_mmio + MV5_PHY_CTL);
1849 writel(tmp, phy_mmio + MV5_PHY_CTL);
1852 tmp = readl(phy_mmio + MV5_PHY_MODE);
1854 tmp |= hpriv->signal[port].pre;
1855 tmp |= hpriv->signal[port].amps;
1856 writel(tmp, phy_mmio + MV5_PHY_MODE);
1861 #define ZERO(reg) writel(0, port_mmio + (reg))
1862 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1865 void __iomem *port_mmio = mv_port_base(mmio, port);
1867 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1869 mv_channel_reset(hpriv, mmio, port);
1871 ZERO(0x028); /* command */
1872 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1873 ZERO(0x004); /* timer */
1874 ZERO(0x008); /* irq err cause */
1875 ZERO(0x00c); /* irq err mask */
1876 ZERO(0x010); /* rq bah */
1877 ZERO(0x014); /* rq inp */
1878 ZERO(0x018); /* rq outp */
1879 ZERO(0x01c); /* respq bah */
1880 ZERO(0x024); /* respq outp */
1881 ZERO(0x020); /* respq inp */
1882 ZERO(0x02c); /* test control */
1883 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1887 #define ZERO(reg) writel(0, hc_mmio + (reg))
1888 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1891 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1899 tmp = readl(hc_mmio + 0x20);
1902 writel(tmp, hc_mmio + 0x20);
1906 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1909 unsigned int hc, port;
1911 for (hc = 0; hc < n_hc; hc++) {
1912 for (port = 0; port < MV_PORTS_PER_HC; port++)
1913 mv5_reset_hc_port(hpriv, mmio,
1914 (hc * MV_PORTS_PER_HC) + port);
1916 mv5_reset_one_hc(hpriv, mmio, hc);
1923 #define ZERO(reg) writel(0, mmio + (reg))
1924 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1928 tmp = readl(mmio + MV_PCI_MODE);
1930 writel(tmp, mmio + MV_PCI_MODE);
1932 ZERO(MV_PCI_DISC_TIMER);
1933 ZERO(MV_PCI_MSI_TRIGGER);
1934 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1935 ZERO(HC_MAIN_IRQ_MASK_OFS);
1936 ZERO(MV_PCI_SERR_MASK);
1937 ZERO(PCI_IRQ_CAUSE_OFS);
1938 ZERO(PCI_IRQ_MASK_OFS);
1939 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1940 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1941 ZERO(MV_PCI_ERR_ATTRIBUTE);
1942 ZERO(MV_PCI_ERR_COMMAND);
1946 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1950 mv5_reset_flash(hpriv, mmio);
1952 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1954 tmp |= (1 << 5) | (1 << 6);
1955 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1959 * mv6_reset_hc - Perform the 6xxx global soft reset
1960 * @mmio: base address of the HBA
1962 * This routine only applies to 6xxx parts.
1965 * Inherited from caller.
1967 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1970 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1974 /* Following procedure defined in PCI "main command and status
1978 writel(t | STOP_PCI_MASTER, reg);
1980 for (i = 0; i < 1000; i++) {
1983 if (PCI_MASTER_EMPTY & t) {
1987 if (!(PCI_MASTER_EMPTY & t)) {
1988 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1996 writel(t | GLOB_SFT_RST, reg);
1999 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2001 if (!(GLOB_SFT_RST & t)) {
2002 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2007 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2010 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2013 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2015 if (GLOB_SFT_RST & t) {
2016 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2023 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2026 void __iomem *port_mmio;
2029 tmp = readl(mmio + MV_RESET_CFG);
2030 if ((tmp & (1 << 0)) == 0) {
2031 hpriv->signal[idx].amps = 0x7 << 8;
2032 hpriv->signal[idx].pre = 0x1 << 5;
2036 port_mmio = mv_port_base(mmio, idx);
2037 tmp = readl(port_mmio + PHY_MODE2);
2039 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2040 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2043 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2045 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2048 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2051 void __iomem *port_mmio = mv_port_base(mmio, port);
2053 u32 hp_flags = hpriv->hp_flags;
2055 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2057 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2060 if (fix_phy_mode2) {
2061 m2 = readl(port_mmio + PHY_MODE2);
2064 writel(m2, port_mmio + PHY_MODE2);
2068 m2 = readl(port_mmio + PHY_MODE2);
2069 m2 &= ~((1 << 16) | (1 << 31));
2070 writel(m2, port_mmio + PHY_MODE2);
2075 /* who knows what this magic does */
2076 tmp = readl(port_mmio + PHY_MODE3);
2079 writel(tmp, port_mmio + PHY_MODE3);
2081 if (fix_phy_mode4) {
2084 m4 = readl(port_mmio + PHY_MODE4);
2086 if (hp_flags & MV_HP_ERRATA_60X1B2)
2087 tmp = readl(port_mmio + 0x310);
2089 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2091 writel(m4, port_mmio + PHY_MODE4);
2093 if (hp_flags & MV_HP_ERRATA_60X1B2)
2094 writel(tmp, port_mmio + 0x310);
2097 /* Revert values of pre-emphasis and signal amps to the saved ones */
2098 m2 = readl(port_mmio + PHY_MODE2);
2100 m2 &= ~MV_M2_PREAMP_MASK;
2101 m2 |= hpriv->signal[port].amps;
2102 m2 |= hpriv->signal[port].pre;
2105 /* according to mvSata 3.6.1, some IIE values are fixed */
2106 if (IS_GEN_IIE(hpriv)) {
2111 writel(m2, port_mmio + PHY_MODE2);
2114 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2115 unsigned int port_no)
2117 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2119 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2121 if (IS_GEN_II(hpriv)) {
2122 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2123 ifctl |= (1 << 7); /* enable gen2i speed */
2124 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2125 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2128 udelay(25); /* allow reset propagation */
2130 /* Spec never mentions clearing the bit. Marvell's driver does
2131 * clear the bit, however.
2133 writelfl(0, port_mmio + EDMA_CMD_OFS);
2135 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2137 if (IS_GEN_I(hpriv))
2142 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2143 * @ap: ATA channel to manipulate
2145 * Part of this is taken from __sata_phy_reset and modified to
2146 * not sleep since this routine gets called from interrupt level.
2149 * Inherited from caller. This is coded to safe to call at
2150 * interrupt level, i.e. it does not sleep.
2152 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2153 unsigned long deadline)
2155 struct mv_port_priv *pp = ap->private_data;
2156 struct mv_host_priv *hpriv = ap->host->private_data;
2157 void __iomem *port_mmio = mv_ap_base(ap);
2161 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2165 u32 sstatus, serror, scontrol;
2167 mv_scr_read(ap, SCR_STATUS, &sstatus);
2168 mv_scr_read(ap, SCR_ERROR, &serror);
2169 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2170 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2171 "SCtrl 0x%08x\n", status, serror, scontrol);
2175 /* Issue COMRESET via SControl */
2177 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2180 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2184 sata_scr_read(ap, SCR_STATUS, &sstatus);
2185 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2189 } while (time_before(jiffies, deadline));
2191 /* work around errata */
2192 if (IS_GEN_II(hpriv) &&
2193 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2195 goto comreset_retry;
2199 u32 sstatus, serror, scontrol;
2201 mv_scr_read(ap, SCR_STATUS, &sstatus);
2202 mv_scr_read(ap, SCR_ERROR, &serror);
2203 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2204 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2205 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2209 if (ata_port_offline(ap)) {
2210 *class = ATA_DEV_NONE;
2214 /* even after SStatus reflects that device is ready,
2215 * it seems to take a while for link to be fully
2216 * established (and thus Status no longer 0x80/0x7F),
2217 * so we poll a bit for that, here.
2221 u8 drv_stat = ata_check_status(ap);
2222 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2227 if (time_after(jiffies, deadline))
2231 /* FIXME: if we passed the deadline, the following
2232 * code probably produces an invalid result
2235 /* finally, read device signature from TF registers */
2236 *class = ata_dev_try_classify(ap, 0, NULL);
2238 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2240 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2245 static int mv_prereset(struct ata_port *ap, unsigned long deadline)
2247 struct mv_port_priv *pp = ap->private_data;
2248 struct ata_eh_context *ehc = &ap->eh_context;
2251 rc = mv_stop_dma(ap);
2253 ehc->i.action |= ATA_EH_HARDRESET;
2255 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2256 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2257 ehc->i.action |= ATA_EH_HARDRESET;
2260 /* if we're about to do hardreset, nothing more to do */
2261 if (ehc->i.action & ATA_EH_HARDRESET)
2264 if (ata_port_online(ap))
2265 rc = ata_wait_ready(ap, deadline);
2272 static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2273 unsigned long deadline)
2275 struct mv_host_priv *hpriv = ap->host->private_data;
2276 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2280 mv_channel_reset(hpriv, mmio, ap->port_no);
2282 mv_phy_reset(ap, class, deadline);
2287 static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2291 /* print link status */
2292 sata_print_link_status(ap);
2295 sata_scr_read(ap, SCR_ERROR, &serr);
2296 sata_scr_write_flush(ap, SCR_ERROR, serr);
2298 /* bail out if no device is present */
2299 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2300 DPRINTK("EXIT, no device\n");
2304 /* set up device control */
2305 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2308 static void mv_error_handler(struct ata_port *ap)
2310 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2311 mv_hardreset, mv_postreset);
2314 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2316 mv_stop_dma(qc->ap);
2319 static void mv_eh_freeze(struct ata_port *ap)
2321 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2322 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2326 /* FIXME: handle coalescing completion events properly */
2328 shift = ap->port_no * 2;
2332 mask = 0x3 << shift;
2334 /* disable assertion of portN err, done events */
2335 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2336 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2339 static void mv_eh_thaw(struct ata_port *ap)
2341 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2342 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2343 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2344 void __iomem *port_mmio = mv_ap_base(ap);
2345 u32 tmp, mask, hc_irq_cause;
2346 unsigned int shift, hc_port_no = ap->port_no;
2348 /* FIXME: handle coalescing completion events properly */
2350 shift = ap->port_no * 2;
2356 mask = 0x3 << shift;
2358 /* clear EDMA errors on this port */
2359 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2361 /* clear pending irq events */
2362 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2363 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2364 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2365 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2367 /* enable assertion of portN err, done events */
2368 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2369 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2373 * mv_port_init - Perform some early initialization on a single port.
2374 * @port: libata data structure storing shadow register addresses
2375 * @port_mmio: base address of the port
2377 * Initialize shadow register mmio addresses, clear outstanding
2378 * interrupts on the port, and unmask interrupts for the future
2379 * start of the port.
2382 * Inherited from caller.
2384 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2386 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2389 /* PIO related setup
2391 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2393 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2394 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2395 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2396 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2397 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2398 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2400 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2401 /* special case: control/altstatus doesn't have ATA_REG_ address */
2402 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2405 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2407 /* Clear any currently outstanding port interrupt conditions */
2408 serr_ofs = mv_scr_offset(SCR_ERROR);
2409 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2410 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2412 /* unmask all EDMA error interrupts */
2413 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2415 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2416 readl(port_mmio + EDMA_CFG_OFS),
2417 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2418 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2421 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2423 struct pci_dev *pdev = to_pci_dev(host->dev);
2424 struct mv_host_priv *hpriv = host->private_data;
2425 u32 hp_flags = hpriv->hp_flags;
2429 hpriv->ops = &mv5xxx_ops;
2430 hp_flags |= MV_HP_GEN_I;
2432 switch (pdev->revision) {
2434 hp_flags |= MV_HP_ERRATA_50XXB0;
2437 hp_flags |= MV_HP_ERRATA_50XXB2;
2440 dev_printk(KERN_WARNING, &pdev->dev,
2441 "Applying 50XXB2 workarounds to unknown rev\n");
2442 hp_flags |= MV_HP_ERRATA_50XXB2;
2449 hpriv->ops = &mv5xxx_ops;
2450 hp_flags |= MV_HP_GEN_I;
2452 switch (pdev->revision) {
2454 hp_flags |= MV_HP_ERRATA_50XXB0;
2457 hp_flags |= MV_HP_ERRATA_50XXB2;
2460 dev_printk(KERN_WARNING, &pdev->dev,
2461 "Applying B2 workarounds to unknown rev\n");
2462 hp_flags |= MV_HP_ERRATA_50XXB2;
2469 hpriv->ops = &mv6xxx_ops;
2470 hp_flags |= MV_HP_GEN_II;
2472 switch (pdev->revision) {
2474 hp_flags |= MV_HP_ERRATA_60X1B2;
2477 hp_flags |= MV_HP_ERRATA_60X1C0;
2480 dev_printk(KERN_WARNING, &pdev->dev,
2481 "Applying B2 workarounds to unknown rev\n");
2482 hp_flags |= MV_HP_ERRATA_60X1B2;
2489 hpriv->ops = &mv6xxx_ops;
2490 hp_flags |= MV_HP_GEN_IIE;
2492 switch (pdev->revision) {
2494 hp_flags |= MV_HP_ERRATA_XX42A0;
2497 hp_flags |= MV_HP_ERRATA_60X1C0;
2500 dev_printk(KERN_WARNING, &pdev->dev,
2501 "Applying 60X1C0 workarounds to unknown rev\n");
2502 hp_flags |= MV_HP_ERRATA_60X1C0;
2508 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2512 hpriv->hp_flags = hp_flags;
2518 * mv_init_host - Perform some early initialization of the host.
2519 * @host: ATA host to initialize
2520 * @board_idx: controller index
2522 * If possible, do an early global reset of the host. Then do
2523 * our port init and clear/unmask all/relevant host interrupts.
2526 * Inherited from caller.
2528 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2530 int rc = 0, n_hc, port, hc;
2531 struct pci_dev *pdev = to_pci_dev(host->dev);
2532 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2533 struct mv_host_priv *hpriv = host->private_data;
2535 /* global interrupt mask */
2536 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2538 rc = mv_chip_id(host, board_idx);
2542 n_hc = mv_get_hc_count(host->ports[0]->flags);
2544 for (port = 0; port < host->n_ports; port++)
2545 hpriv->ops->read_preamp(hpriv, port, mmio);
2547 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2551 hpriv->ops->reset_flash(hpriv, mmio);
2552 hpriv->ops->reset_bus(pdev, mmio);
2553 hpriv->ops->enable_leds(hpriv, mmio);
2555 for (port = 0; port < host->n_ports; port++) {
2556 if (IS_GEN_II(hpriv)) {
2557 void __iomem *port_mmio = mv_port_base(mmio, port);
2559 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2560 ifctl |= (1 << 7); /* enable gen2i speed */
2561 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2562 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2565 hpriv->ops->phy_errata(hpriv, mmio, port);
2568 for (port = 0; port < host->n_ports; port++) {
2569 void __iomem *port_mmio = mv_port_base(mmio, port);
2570 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2573 for (hc = 0; hc < n_hc; hc++) {
2574 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2576 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2577 "(before clear)=0x%08x\n", hc,
2578 readl(hc_mmio + HC_CFG_OFS),
2579 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2581 /* Clear any currently outstanding hc interrupt conditions */
2582 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2585 /* Clear any currently outstanding host interrupt conditions */
2586 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2588 /* and unmask interrupt generation for host regs */
2589 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2591 if (IS_GEN_I(hpriv))
2592 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2594 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2596 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2597 "PCI int cause/mask=0x%08x/0x%08x\n",
2598 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2599 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2600 readl(mmio + PCI_IRQ_CAUSE_OFS),
2601 readl(mmio + PCI_IRQ_MASK_OFS));
2608 * mv_print_info - Dump key info to kernel log for perusal.
2609 * @host: ATA host to print info about
2611 * FIXME: complete this.
2614 * Inherited from caller.
2616 static void mv_print_info(struct ata_host *host)
2618 struct pci_dev *pdev = to_pci_dev(host->dev);
2619 struct mv_host_priv *hpriv = host->private_data;
2621 const char *scc_s, *gen;
2623 /* Use this to determine the HW stepping of the chip so we know
2624 * what errata to workaround
2626 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2629 else if (scc == 0x01)
2634 if (IS_GEN_I(hpriv))
2636 else if (IS_GEN_II(hpriv))
2638 else if (IS_GEN_IIE(hpriv))
2643 dev_printk(KERN_INFO, &pdev->dev,
2644 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2645 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2646 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2650 * mv_init_one - handle a positive probe of a Marvell host
2651 * @pdev: PCI device found
2652 * @ent: PCI device ID entry for the matched host
2655 * Inherited from caller.
2657 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2659 static int printed_version = 0;
2660 unsigned int board_idx = (unsigned int)ent->driver_data;
2661 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2662 struct ata_host *host;
2663 struct mv_host_priv *hpriv;
2666 if (!printed_version++)
2667 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2670 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2672 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2673 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2674 if (!host || !hpriv)
2676 host->private_data = hpriv;
2678 /* acquire resources */
2679 rc = pcim_enable_device(pdev);
2683 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2685 pcim_pin_device(pdev);
2688 host->iomap = pcim_iomap_table(pdev);
2690 rc = pci_go_64(pdev);
2694 /* initialize adapter */
2695 rc = mv_init_host(host, board_idx);
2699 /* Enable interrupts */
2700 if (msi && pci_enable_msi(pdev))
2703 mv_dump_pci_cfg(pdev, 0x68);
2704 mv_print_info(host);
2706 pci_set_master(pdev);
2707 pci_try_set_mwi(pdev);
2708 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2709 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2712 static int __init mv_init(void)
2714 return pci_register_driver(&mv_pci_driver);
2717 static void __exit mv_exit(void)
2719 pci_unregister_driver(&mv_pci_driver);
2722 MODULE_AUTHOR("Brett Russ");
2723 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2724 MODULE_LICENSE("GPL");
2725 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2726 MODULE_VERSION(DRV_VERSION);
2728 module_param(msi, int, 0444);
2729 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2731 module_init(mv_init);
2732 module_exit(mv_exit);