2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <linux/libata.h>
74 #define DRV_NAME "sata_mv"
75 #define DRV_VERSION "0.81"
78 /* BAR's are enumerated in terms of pci_resource_start() terms */
79 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
80 MV_IO_BAR = 2, /* offset 0x18: IO space */
81 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
84 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
87 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
88 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
89 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
90 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
91 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
92 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94 MV_SATAHC0_REG_BASE = 0x20000,
95 MV_FLASH_CTL = 0x1046c,
96 MV_GPIO_PORT_CTL = 0x104f0,
97 MV_RESET_CFG = 0x180d8,
99 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
100 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
102 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
105 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
108 * CRPB needs alignment on a 256B boundary. Size == 256B
109 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
116 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
119 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
120 MV_PORT_HC_SHIFT = 2,
121 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
125 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
126 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
127 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
128 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
129 ATA_FLAG_PIO_POLLING,
130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
132 CRQB_FLAG_READ = (1 << 0),
134 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
135 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
136 CRQB_CMD_ADDR_SHIFT = 8,
137 CRQB_CMD_CS = (0x2 << 11),
138 CRQB_CMD_LAST = (1 << 15),
140 CRPB_FLAG_STATUS_SHIFT = 8,
141 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
142 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
144 EPRD_FLAG_END_OF_TBL = (1 << 31),
146 /* PCI interface registers */
148 PCI_COMMAND_OFS = 0xc00,
150 PCI_MAIN_CMD_STS_OFS = 0xd30,
151 STOP_PCI_MASTER = (1 << 2),
152 PCI_MASTER_EMPTY = (1 << 3),
153 GLOB_SFT_RST = (1 << 4),
156 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
157 MV_PCI_DISC_TIMER = 0xd04,
158 MV_PCI_MSI_TRIGGER = 0xc38,
159 MV_PCI_SERR_MASK = 0xc28,
160 MV_PCI_XBAR_TMOUT = 0x1d04,
161 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
162 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
163 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
164 MV_PCI_ERR_COMMAND = 0x1d50,
166 PCI_IRQ_CAUSE_OFS = 0x1d58,
167 PCI_IRQ_MASK_OFS = 0x1d5c,
168 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
171 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
172 PORT0_ERR = (1 << 0), /* shift by port # */
173 PORT0_DONE = (1 << 1), /* shift by port # */
174 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
175 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
177 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
178 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
179 PORTS_0_3_COAL_DONE = (1 << 8),
180 PORTS_4_7_COAL_DONE = (1 << 17),
181 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
182 GPIO_INT = (1 << 22),
183 SELF_INT = (1 << 23),
184 TWSI_INT = (1 << 24),
185 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
186 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
187 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
188 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
190 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
193 /* SATAHC registers */
196 HC_IRQ_CAUSE_OFS = 0x14,
197 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
198 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
199 DEV_IRQ = (1 << 8), /* shift by port # */
201 /* Shadow block registers */
203 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
206 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
207 SATA_ACTIVE_OFS = 0x350,
214 SATA_INTERFACE_CTL = 0x050,
216 MV_M2_PREAMP_MASK = 0x7e0,
220 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
221 EDMA_CFG_NCQ = (1 << 5),
222 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
223 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
224 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
226 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
227 EDMA_ERR_IRQ_MASK_OFS = 0xc,
228 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
229 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
230 EDMA_ERR_DEV = (1 << 2), /* device error */
231 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
232 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
233 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
234 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
235 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
236 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
237 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
238 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
239 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
240 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
241 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
242 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
243 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
244 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
245 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
246 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
247 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
248 EDMA_ERR_OVERRUN_5 = (1 << 5),
249 EDMA_ERR_UNDERRUN_5 = (1 << 6),
250 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
260 EDMA_ERR_LNK_CTRL_RX_2 |
261 EDMA_ERR_LNK_DATA_RX |
262 EDMA_ERR_LNK_DATA_TX |
263 EDMA_ERR_TRANS_PROTO,
264 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
269 EDMA_ERR_UNDERRUN_5 |
270 EDMA_ERR_SELF_DIS_5 |
276 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
277 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
279 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
280 EDMA_REQ_Q_PTR_SHIFT = 5,
282 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
283 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
284 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
285 EDMA_RSP_Q_PTR_SHIFT = 3,
287 EDMA_CMD_OFS = 0x28, /* EDMA command register */
288 EDMA_EN = (1 << 0), /* enable EDMA */
289 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
290 ATA_RST = (1 << 2), /* reset trans/link/phy */
292 EDMA_IORDY_TMOUT = 0x34,
295 /* Host private flags (hp_flags) */
296 MV_HP_FLAG_MSI = (1 << 0),
297 MV_HP_ERRATA_50XXB0 = (1 << 1),
298 MV_HP_ERRATA_50XXB2 = (1 << 2),
299 MV_HP_ERRATA_60X1B2 = (1 << 3),
300 MV_HP_ERRATA_60X1C0 = (1 << 4),
301 MV_HP_ERRATA_XX42A0 = (1 << 5),
302 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
303 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
304 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
306 /* Port private flags (pp_flags) */
307 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
308 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
311 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
312 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
313 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
316 MV_DMA_BOUNDARY = 0xffffffffU,
318 /* mask of register bits containing lower 32 bits
319 * of EDMA request queue DMA address
321 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
323 /* ditto, for response queue */
324 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
337 /* Command ReQuest Block: 32B */
353 /* Command ResPonse Block: 8B */
360 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
368 struct mv_port_priv {
369 struct mv_crqb *crqb;
371 struct mv_crpb *crpb;
373 struct mv_sg *sg_tbl;
374 dma_addr_t sg_tbl_dma;
376 unsigned int req_idx;
377 unsigned int resp_idx;
382 struct mv_port_signal {
389 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
391 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
392 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
394 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
396 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
397 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
400 struct mv_host_priv {
402 struct mv_port_signal signal[8];
403 const struct mv_hw_ops *ops;
406 static void mv_irq_clear(struct ata_port *ap);
407 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
408 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
409 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
410 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
411 static int mv_port_start(struct ata_port *ap);
412 static void mv_port_stop(struct ata_port *ap);
413 static void mv_qc_prep(struct ata_queued_cmd *qc);
414 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
415 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
416 static void mv_error_handler(struct ata_port *ap);
417 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
418 static void mv_eh_freeze(struct ata_port *ap);
419 static void mv_eh_thaw(struct ata_port *ap);
420 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
422 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
424 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
425 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
427 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
429 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
430 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
432 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
434 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
435 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
437 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
439 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
440 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
441 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
442 unsigned int port_no);
444 static struct scsi_host_template mv5_sht = {
445 .module = THIS_MODULE,
447 .ioctl = ata_scsi_ioctl,
448 .queuecommand = ata_scsi_queuecmd,
449 .can_queue = ATA_DEF_QUEUE,
450 .this_id = ATA_SHT_THIS_ID,
451 .sg_tablesize = MV_MAX_SG_CT,
452 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
453 .emulated = ATA_SHT_EMULATED,
455 .proc_name = DRV_NAME,
456 .dma_boundary = MV_DMA_BOUNDARY,
457 .slave_configure = ata_scsi_slave_config,
458 .slave_destroy = ata_scsi_slave_destroy,
459 .bios_param = ata_std_bios_param,
462 static struct scsi_host_template mv6_sht = {
463 .module = THIS_MODULE,
465 .ioctl = ata_scsi_ioctl,
466 .queuecommand = ata_scsi_queuecmd,
467 .can_queue = ATA_DEF_QUEUE,
468 .this_id = ATA_SHT_THIS_ID,
469 .sg_tablesize = MV_MAX_SG_CT,
470 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
471 .emulated = ATA_SHT_EMULATED,
473 .proc_name = DRV_NAME,
474 .dma_boundary = MV_DMA_BOUNDARY,
475 .slave_configure = ata_scsi_slave_config,
476 .slave_destroy = ata_scsi_slave_destroy,
477 .bios_param = ata_std_bios_param,
480 static const struct ata_port_operations mv5_ops = {
481 .port_disable = ata_port_disable,
483 .tf_load = ata_tf_load,
484 .tf_read = ata_tf_read,
485 .check_status = ata_check_status,
486 .exec_command = ata_exec_command,
487 .dev_select = ata_std_dev_select,
489 .cable_detect = ata_cable_sata,
491 .qc_prep = mv_qc_prep,
492 .qc_issue = mv_qc_issue,
493 .data_xfer = ata_data_xfer,
495 .irq_clear = mv_irq_clear,
496 .irq_on = ata_irq_on,
497 .irq_ack = ata_irq_ack,
499 .error_handler = mv_error_handler,
500 .post_internal_cmd = mv_post_int_cmd,
501 .freeze = mv_eh_freeze,
504 .scr_read = mv5_scr_read,
505 .scr_write = mv5_scr_write,
507 .port_start = mv_port_start,
508 .port_stop = mv_port_stop,
511 static const struct ata_port_operations mv6_ops = {
512 .port_disable = ata_port_disable,
514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
520 .cable_detect = ata_cable_sata,
522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
524 .data_xfer = ata_data_xfer,
526 .irq_clear = mv_irq_clear,
527 .irq_on = ata_irq_on,
528 .irq_ack = ata_irq_ack,
530 .error_handler = mv_error_handler,
531 .post_internal_cmd = mv_post_int_cmd,
532 .freeze = mv_eh_freeze,
535 .scr_read = mv_scr_read,
536 .scr_write = mv_scr_write,
538 .port_start = mv_port_start,
539 .port_stop = mv_port_stop,
542 static const struct ata_port_operations mv_iie_ops = {
543 .port_disable = ata_port_disable,
545 .tf_load = ata_tf_load,
546 .tf_read = ata_tf_read,
547 .check_status = ata_check_status,
548 .exec_command = ata_exec_command,
549 .dev_select = ata_std_dev_select,
551 .cable_detect = ata_cable_sata,
553 .qc_prep = mv_qc_prep_iie,
554 .qc_issue = mv_qc_issue,
555 .data_xfer = ata_data_xfer,
557 .irq_clear = mv_irq_clear,
558 .irq_on = ata_irq_on,
559 .irq_ack = ata_irq_ack,
561 .error_handler = mv_error_handler,
562 .post_internal_cmd = mv_post_int_cmd,
563 .freeze = mv_eh_freeze,
566 .scr_read = mv_scr_read,
567 .scr_write = mv_scr_write,
569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
573 static const struct ata_port_info mv_port_info[] = {
575 .flags = MV_COMMON_FLAGS,
576 .pio_mask = 0x1f, /* pio0-4 */
577 .udma_mask = ATA_UDMA6,
578 .port_ops = &mv5_ops,
581 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
582 .pio_mask = 0x1f, /* pio0-4 */
583 .udma_mask = ATA_UDMA6,
584 .port_ops = &mv5_ops,
587 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
588 .pio_mask = 0x1f, /* pio0-4 */
589 .udma_mask = ATA_UDMA6,
590 .port_ops = &mv5_ops,
593 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
594 .pio_mask = 0x1f, /* pio0-4 */
595 .udma_mask = ATA_UDMA6,
596 .port_ops = &mv6_ops,
599 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
601 .pio_mask = 0x1f, /* pio0-4 */
602 .udma_mask = ATA_UDMA6,
603 .port_ops = &mv6_ops,
606 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
607 .pio_mask = 0x1f, /* pio0-4 */
608 .udma_mask = ATA_UDMA6,
609 .port_ops = &mv_iie_ops,
612 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
613 .pio_mask = 0x1f, /* pio0-4 */
614 .udma_mask = ATA_UDMA6,
615 .port_ops = &mv_iie_ops,
619 static const struct pci_device_id mv_pci_tbl[] = {
620 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
621 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
622 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
623 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
625 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
626 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
627 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
628 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
629 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
631 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
634 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
636 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
638 /* add Marvell 7042 support */
639 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
641 { } /* terminate list */
644 static struct pci_driver mv_pci_driver = {
646 .id_table = mv_pci_tbl,
647 .probe = mv_init_one,
648 .remove = ata_pci_remove_one,
651 static const struct mv_hw_ops mv5xxx_ops = {
652 .phy_errata = mv5_phy_errata,
653 .enable_leds = mv5_enable_leds,
654 .read_preamp = mv5_read_preamp,
655 .reset_hc = mv5_reset_hc,
656 .reset_flash = mv5_reset_flash,
657 .reset_bus = mv5_reset_bus,
660 static const struct mv_hw_ops mv6xxx_ops = {
661 .phy_errata = mv6_phy_errata,
662 .enable_leds = mv6_enable_leds,
663 .read_preamp = mv6_read_preamp,
664 .reset_hc = mv6_reset_hc,
665 .reset_flash = mv6_reset_flash,
666 .reset_bus = mv_reset_pci_bus,
672 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
675 /* move to PCI layer or libata core? */
676 static int pci_go_64(struct pci_dev *pdev)
680 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
681 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
683 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
685 dev_printk(KERN_ERR, &pdev->dev,
686 "64-bit DMA enable failed\n");
691 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
693 dev_printk(KERN_ERR, &pdev->dev,
694 "32-bit DMA enable failed\n");
697 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
699 dev_printk(KERN_ERR, &pdev->dev,
700 "32-bit consistent DMA enable failed\n");
712 static inline void writelfl(unsigned long data, void __iomem *addr)
715 (void) readl(addr); /* flush to avoid PCI posted write */
718 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
720 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
723 static inline unsigned int mv_hc_from_port(unsigned int port)
725 return port >> MV_PORT_HC_SHIFT;
728 static inline unsigned int mv_hardport_from_port(unsigned int port)
730 return port & MV_PORT_MASK;
733 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
736 return mv_hc_base(base, mv_hc_from_port(port));
739 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
741 return mv_hc_base_from_port(base, port) +
742 MV_SATAHC_ARBTR_REG_SZ +
743 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
746 static inline void __iomem *mv_ap_base(struct ata_port *ap)
748 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
751 static inline int mv_get_hc_count(unsigned long port_flags)
753 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
756 static void mv_irq_clear(struct ata_port *ap)
760 static void mv_set_edma_ptrs(void __iomem *port_mmio,
761 struct mv_host_priv *hpriv,
762 struct mv_port_priv *pp)
767 * initialize request queue
769 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
771 WARN_ON(pp->crqb_dma & 0x3ff);
772 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
773 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
774 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
776 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
777 writelfl((pp->crqb_dma & 0xffffffff) | index,
778 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
780 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
783 * initialize response queue
785 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
787 WARN_ON(pp->crpb_dma & 0xff);
788 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
790 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
791 writelfl((pp->crpb_dma & 0xffffffff) | index,
792 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
794 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
796 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
797 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
801 * mv_start_dma - Enable eDMA engine
802 * @base: port base address
803 * @pp: port private data
805 * Verify the local cache of the eDMA state is accurate with a
809 * Inherited from caller.
811 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
812 struct mv_port_priv *pp)
814 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
815 /* clear EDMA event indicators, if any */
816 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
818 mv_set_edma_ptrs(base, hpriv, pp);
820 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
821 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
823 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
827 * __mv_stop_dma - Disable eDMA engine
828 * @ap: ATA channel to manipulate
830 * Verify the local cache of the eDMA state is accurate with a
834 * Inherited from caller.
836 static int __mv_stop_dma(struct ata_port *ap)
838 void __iomem *port_mmio = mv_ap_base(ap);
839 struct mv_port_priv *pp = ap->private_data;
843 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
844 /* Disable EDMA if active. The disable bit auto clears.
846 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
847 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
849 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
852 /* now properly wait for the eDMA to stop */
853 for (i = 1000; i > 0; i--) {
854 reg = readl(port_mmio + EDMA_CMD_OFS);
855 if (!(reg & EDMA_EN))
862 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
869 static int mv_stop_dma(struct ata_port *ap)
874 spin_lock_irqsave(&ap->host->lock, flags);
875 rc = __mv_stop_dma(ap);
876 spin_unlock_irqrestore(&ap->host->lock, flags);
882 static void mv_dump_mem(void __iomem *start, unsigned bytes)
885 for (b = 0; b < bytes; ) {
886 DPRINTK("%p: ", start + b);
887 for (w = 0; b < bytes && w < 4; w++) {
888 printk("%08x ",readl(start + b));
896 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
901 for (b = 0; b < bytes; ) {
902 DPRINTK("%02x: ", b);
903 for (w = 0; b < bytes && w < 4; w++) {
904 (void) pci_read_config_dword(pdev,b,&dw);
912 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
913 struct pci_dev *pdev)
916 void __iomem *hc_base = mv_hc_base(mmio_base,
917 port >> MV_PORT_HC_SHIFT);
918 void __iomem *port_base;
919 int start_port, num_ports, p, start_hc, num_hcs, hc;
922 start_hc = start_port = 0;
923 num_ports = 8; /* shld be benign for 4 port devs */
926 start_hc = port >> MV_PORT_HC_SHIFT;
928 num_ports = num_hcs = 1;
930 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
931 num_ports > 1 ? num_ports - 1 : start_port);
934 DPRINTK("PCI config space regs:\n");
935 mv_dump_pci_cfg(pdev, 0x68);
937 DPRINTK("PCI regs:\n");
938 mv_dump_mem(mmio_base+0xc00, 0x3c);
939 mv_dump_mem(mmio_base+0xd00, 0x34);
940 mv_dump_mem(mmio_base+0xf00, 0x4);
941 mv_dump_mem(mmio_base+0x1d00, 0x6c);
942 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
943 hc_base = mv_hc_base(mmio_base, hc);
944 DPRINTK("HC regs (HC %i):\n", hc);
945 mv_dump_mem(hc_base, 0x1c);
947 for (p = start_port; p < start_port + num_ports; p++) {
948 port_base = mv_port_base(mmio_base, p);
949 DPRINTK("EDMA regs (port %i):\n",p);
950 mv_dump_mem(port_base, 0x54);
951 DPRINTK("SATA regs (port %i):\n",p);
952 mv_dump_mem(port_base+0x300, 0x60);
957 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
965 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
968 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
977 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
979 unsigned int ofs = mv_scr_offset(sc_reg_in);
981 if (ofs != 0xffffffffU) {
982 *val = readl(mv_ap_base(ap) + ofs);
988 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
990 unsigned int ofs = mv_scr_offset(sc_reg_in);
992 if (ofs != 0xffffffffU) {
993 writelfl(val, mv_ap_base(ap) + ofs);
999 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1000 void __iomem *port_mmio)
1002 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1004 /* set up non-NCQ EDMA configuration */
1005 cfg &= ~(1 << 9); /* disable eQue */
1007 if (IS_GEN_I(hpriv)) {
1008 cfg &= ~0x1f; /* clear queue depth */
1009 cfg |= (1 << 8); /* enab config burst size mask */
1012 else if (IS_GEN_II(hpriv)) {
1013 cfg &= ~0x1f; /* clear queue depth */
1014 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1015 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1018 else if (IS_GEN_IIE(hpriv)) {
1019 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1020 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1021 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1022 cfg |= (1 << 18); /* enab early completion */
1023 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1024 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
1025 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1028 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1032 * mv_port_start - Port specific init/start routine.
1033 * @ap: ATA channel to manipulate
1035 * Allocate and point to DMA memory, init port private memory,
1039 * Inherited from caller.
1041 static int mv_port_start(struct ata_port *ap)
1043 struct device *dev = ap->host->dev;
1044 struct mv_host_priv *hpriv = ap->host->private_data;
1045 struct mv_port_priv *pp;
1046 void __iomem *port_mmio = mv_ap_base(ap);
1049 unsigned long flags;
1052 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1056 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1060 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1062 rc = ata_pad_alloc(ap, dev);
1066 /* First item in chunk of DMA memory:
1067 * 32-slot command request table (CRQB), 32 bytes each in size
1070 pp->crqb_dma = mem_dma;
1071 mem += MV_CRQB_Q_SZ;
1072 mem_dma += MV_CRQB_Q_SZ;
1075 * 32-slot command response table (CRPB), 8 bytes each in size
1078 pp->crpb_dma = mem_dma;
1079 mem += MV_CRPB_Q_SZ;
1080 mem_dma += MV_CRPB_Q_SZ;
1083 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1086 pp->sg_tbl_dma = mem_dma;
1088 spin_lock_irqsave(&ap->host->lock, flags);
1090 mv_edma_cfg(ap, hpriv, port_mmio);
1092 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1094 spin_unlock_irqrestore(&ap->host->lock, flags);
1096 /* Don't turn on EDMA here...do it before DMA commands only. Else
1097 * we'll be unable to send non-data, PIO, etc due to restricted access
1100 ap->private_data = pp;
1105 * mv_port_stop - Port specific cleanup/stop routine.
1106 * @ap: ATA channel to manipulate
1108 * Stop DMA, cleanup port memory.
1111 * This routine uses the host lock to protect the DMA stop.
1113 static void mv_port_stop(struct ata_port *ap)
1119 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1120 * @qc: queued command whose SG list to source from
1122 * Populate the SG list and mark the last entry.
1125 * Inherited from caller.
1127 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1129 struct mv_port_priv *pp = qc->ap->private_data;
1130 unsigned int n_sg = 0;
1131 struct scatterlist *sg;
1132 struct mv_sg *mv_sg;
1135 ata_for_each_sg(sg, qc) {
1136 dma_addr_t addr = sg_dma_address(sg);
1137 u32 sg_len = sg_dma_len(sg);
1139 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1140 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1141 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1143 if (ata_sg_is_last(sg, qc))
1144 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1153 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1155 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1156 (last ? CRQB_CMD_LAST : 0);
1157 *cmdw = cpu_to_le16(tmp);
1161 * mv_qc_prep - Host specific command preparation.
1162 * @qc: queued command to prepare
1164 * This routine simply redirects to the general purpose routine
1165 * if command is not DMA. Else, it handles prep of the CRQB
1166 * (command request block), does some sanity checking, and calls
1167 * the SG load routine.
1170 * Inherited from caller.
1172 static void mv_qc_prep(struct ata_queued_cmd *qc)
1174 struct ata_port *ap = qc->ap;
1175 struct mv_port_priv *pp = ap->private_data;
1177 struct ata_taskfile *tf;
1181 if (qc->tf.protocol != ATA_PROT_DMA)
1184 /* Fill in command request block
1186 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1187 flags |= CRQB_FLAG_READ;
1188 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1189 flags |= qc->tag << CRQB_TAG_SHIFT;
1190 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1192 /* get current queue index from software */
1193 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1195 pp->crqb[in_index].sg_addr =
1196 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1197 pp->crqb[in_index].sg_addr_hi =
1198 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1199 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1201 cw = &pp->crqb[in_index].ata_cmd[0];
1204 /* Sadly, the CRQB cannot accomodate all registers--there are
1205 * only 11 bytes...so we must pick and choose required
1206 * registers based on the command. So, we drop feature and
1207 * hob_feature for [RW] DMA commands, but they are needed for
1208 * NCQ. NCQ will drop hob_nsect.
1210 switch (tf->command) {
1212 case ATA_CMD_READ_EXT:
1214 case ATA_CMD_WRITE_EXT:
1215 case ATA_CMD_WRITE_FUA_EXT:
1216 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1218 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1219 case ATA_CMD_FPDMA_READ:
1220 case ATA_CMD_FPDMA_WRITE:
1221 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1222 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1224 #endif /* FIXME: remove this line when NCQ added */
1226 /* The only other commands EDMA supports in non-queued and
1227 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1228 * of which are defined/used by Linux. If we get here, this
1229 * driver needs work.
1231 * FIXME: modify libata to give qc_prep a return value and
1232 * return error here.
1234 BUG_ON(tf->command);
1237 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1238 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1239 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1240 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1241 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1242 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1243 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1244 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1245 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1247 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1253 * mv_qc_prep_iie - Host specific command preparation.
1254 * @qc: queued command to prepare
1256 * This routine simply redirects to the general purpose routine
1257 * if command is not DMA. Else, it handles prep of the CRQB
1258 * (command request block), does some sanity checking, and calls
1259 * the SG load routine.
1262 * Inherited from caller.
1264 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1266 struct ata_port *ap = qc->ap;
1267 struct mv_port_priv *pp = ap->private_data;
1268 struct mv_crqb_iie *crqb;
1269 struct ata_taskfile *tf;
1273 if (qc->tf.protocol != ATA_PROT_DMA)
1276 /* Fill in Gen IIE command request block
1278 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1279 flags |= CRQB_FLAG_READ;
1281 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1282 flags |= qc->tag << CRQB_TAG_SHIFT;
1283 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1284 what we use as our tag */
1286 /* get current queue index from software */
1287 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1289 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1290 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1291 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1292 crqb->flags = cpu_to_le32(flags);
1295 crqb->ata_cmd[0] = cpu_to_le32(
1296 (tf->command << 16) |
1299 crqb->ata_cmd[1] = cpu_to_le32(
1305 crqb->ata_cmd[2] = cpu_to_le32(
1306 (tf->hob_lbal << 0) |
1307 (tf->hob_lbam << 8) |
1308 (tf->hob_lbah << 16) |
1309 (tf->hob_feature << 24)
1311 crqb->ata_cmd[3] = cpu_to_le32(
1313 (tf->hob_nsect << 8)
1316 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1322 * mv_qc_issue - Initiate a command to the host
1323 * @qc: queued command to start
1325 * This routine simply redirects to the general purpose routine
1326 * if command is not DMA. Else, it sanity checks our local
1327 * caches of the request producer/consumer indices then enables
1328 * DMA and bumps the request producer index.
1331 * Inherited from caller.
1333 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1335 struct ata_port *ap = qc->ap;
1336 void __iomem *port_mmio = mv_ap_base(ap);
1337 struct mv_port_priv *pp = ap->private_data;
1338 struct mv_host_priv *hpriv = ap->host->private_data;
1341 if (qc->tf.protocol != ATA_PROT_DMA) {
1342 /* We're about to send a non-EDMA capable command to the
1343 * port. Turn off EDMA so there won't be problems accessing
1344 * shadow block, etc registers.
1347 return ata_qc_issue_prot(qc);
1350 mv_start_dma(port_mmio, hpriv, pp);
1352 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1354 /* until we do queuing, the queue should be empty at this point */
1355 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1356 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1360 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1362 /* and write the request in pointer to kick the EDMA to life */
1363 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1364 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1370 * mv_err_intr - Handle error interrupts on the port
1371 * @ap: ATA channel to manipulate
1372 * @reset_allowed: bool: 0 == don't trigger from reset here
1374 * In most cases, just clear the interrupt and move on. However,
1375 * some cases require an eDMA reset, which is done right before
1376 * the COMRESET in mv_phy_reset(). The SERR case requires a
1377 * clear of pending errors in the SATA SERROR register. Finally,
1378 * if the port disabled DMA, update our cached copy to match.
1381 * Inherited from caller.
1383 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1385 void __iomem *port_mmio = mv_ap_base(ap);
1386 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1387 struct mv_port_priv *pp = ap->private_data;
1388 struct mv_host_priv *hpriv = ap->host->private_data;
1389 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1390 unsigned int action = 0, err_mask = 0;
1391 struct ata_eh_info *ehi = &ap->eh_info;
1393 ata_ehi_clear_desc(ehi);
1395 if (!edma_enabled) {
1396 /* just a guess: do we need to do this? should we
1397 * expand this, and do it in all cases?
1399 sata_scr_read(ap, SCR_ERROR, &serr);
1400 sata_scr_write_flush(ap, SCR_ERROR, serr);
1403 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1405 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1408 * all generations share these EDMA error cause bits
1411 if (edma_err_cause & EDMA_ERR_DEV)
1412 err_mask |= AC_ERR_DEV;
1413 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1414 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1415 EDMA_ERR_INTRL_PAR)) {
1416 err_mask |= AC_ERR_ATA_BUS;
1417 action |= ATA_EH_HARDRESET;
1418 ata_ehi_push_desc(ehi, "parity error");
1420 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1421 ata_ehi_hotplugged(ehi);
1422 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1423 "dev disconnect" : "dev connect");
1426 if (IS_GEN_I(hpriv)) {
1427 eh_freeze_mask = EDMA_EH_FREEZE_5;
1429 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1430 struct mv_port_priv *pp = ap->private_data;
1431 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1432 ata_ehi_push_desc(ehi, "EDMA self-disable");
1435 eh_freeze_mask = EDMA_EH_FREEZE;
1437 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1438 struct mv_port_priv *pp = ap->private_data;
1439 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1440 ata_ehi_push_desc(ehi, "EDMA self-disable");
1443 if (edma_err_cause & EDMA_ERR_SERR) {
1444 sata_scr_read(ap, SCR_ERROR, &serr);
1445 sata_scr_write_flush(ap, SCR_ERROR, serr);
1446 err_mask = AC_ERR_ATA_BUS;
1447 action |= ATA_EH_HARDRESET;
1451 /* Clear EDMA now that SERR cleanup done */
1452 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1455 err_mask = AC_ERR_OTHER;
1456 action |= ATA_EH_HARDRESET;
1459 ehi->serror |= serr;
1460 ehi->action |= action;
1463 qc->err_mask |= err_mask;
1465 ehi->err_mask |= err_mask;
1467 if (edma_err_cause & eh_freeze_mask)
1468 ata_port_freeze(ap);
1473 static void mv_intr_pio(struct ata_port *ap)
1475 struct ata_queued_cmd *qc;
1478 /* ignore spurious intr if drive still BUSY */
1479 ata_status = readb(ap->ioaddr.status_addr);
1480 if (unlikely(ata_status & ATA_BUSY))
1483 /* get active ATA command */
1484 qc = ata_qc_from_tag(ap, ap->active_tag);
1485 if (unlikely(!qc)) /* no active tag */
1487 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1490 /* and finally, complete the ATA command */
1491 qc->err_mask |= ac_err_mask(ata_status);
1492 ata_qc_complete(qc);
1495 static void mv_intr_edma(struct ata_port *ap)
1497 void __iomem *port_mmio = mv_ap_base(ap);
1498 struct mv_host_priv *hpriv = ap->host->private_data;
1499 struct mv_port_priv *pp = ap->private_data;
1500 struct ata_queued_cmd *qc;
1501 u32 out_index, in_index;
1502 bool work_done = false;
1504 /* get h/w response queue pointer */
1505 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1506 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1512 /* get s/w response queue last-read pointer, and compare */
1513 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1514 if (in_index == out_index)
1517 /* 50xx: get active ATA command */
1518 if (IS_GEN_I(hpriv))
1519 tag = ap->active_tag;
1521 /* Gen II/IIE: get active ATA command via tag, to enable
1522 * support for queueing. this works transparently for
1523 * queued and non-queued modes.
1525 else if (IS_GEN_II(hpriv))
1526 tag = (le16_to_cpu(pp->crpb[out_index].id)
1527 >> CRPB_IOID_SHIFT_6) & 0x3f;
1529 else /* IS_GEN_IIE */
1530 tag = (le16_to_cpu(pp->crpb[out_index].id)
1531 >> CRPB_IOID_SHIFT_7) & 0x3f;
1533 qc = ata_qc_from_tag(ap, tag);
1535 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1536 * bits (WARNING: might not necessarily be associated
1537 * with this command), which -should- be clear
1540 status = le16_to_cpu(pp->crpb[out_index].flags);
1541 if (unlikely(status & 0xff)) {
1542 mv_err_intr(ap, qc);
1546 /* and finally, complete the ATA command */
1549 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1550 ata_qc_complete(qc);
1553 /* advance software response queue pointer, to
1554 * indicate (after the loop completes) to hardware
1555 * that we have consumed a response queue entry.
1562 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1563 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1564 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1568 * mv_host_intr - Handle all interrupts on the given host controller
1569 * @host: host specific structure
1570 * @relevant: port error bits relevant to this host controller
1571 * @hc: which host controller we're to look at
1573 * Read then write clear the HC interrupt status then walk each
1574 * port connected to the HC and see if it needs servicing. Port
1575 * success ints are reported in the HC interrupt status reg, the
1576 * port error ints are reported in the higher level main
1577 * interrupt status register and thus are passed in via the
1578 * 'relevant' argument.
1581 * Inherited from caller.
1583 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1585 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1586 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1593 port0 = MV_PORTS_PER_HC;
1595 /* we'll need the HC success int register in most cases */
1596 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1600 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1602 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1603 hc,relevant,hc_irq_cause);
1605 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1606 struct ata_port *ap = host->ports[port];
1607 struct mv_port_priv *pp = ap->private_data;
1608 int have_err_bits, hard_port, shift;
1610 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1613 shift = port << 1; /* (port * 2) */
1614 if (port >= MV_PORTS_PER_HC) {
1615 shift++; /* skip bit 8 in the HC Main IRQ reg */
1617 have_err_bits = ((PORT0_ERR << shift) & relevant);
1619 if (unlikely(have_err_bits)) {
1620 struct ata_queued_cmd *qc;
1622 qc = ata_qc_from_tag(ap, ap->active_tag);
1623 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1626 mv_err_intr(ap, qc);
1630 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1632 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1633 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1636 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1643 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1645 struct ata_port *ap;
1646 struct ata_queued_cmd *qc;
1647 struct ata_eh_info *ehi;
1648 unsigned int i, err_mask, printed = 0;
1651 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1653 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1656 DPRINTK("All regs @ PCI error\n");
1657 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1659 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1661 for (i = 0; i < host->n_ports; i++) {
1662 ap = host->ports[i];
1663 if (!ata_port_offline(ap)) {
1665 ata_ehi_clear_desc(ehi);
1667 ata_ehi_push_desc(ehi,
1668 "PCI err cause 0x%08x", err_cause);
1669 err_mask = AC_ERR_HOST_BUS;
1670 ehi->action = ATA_EH_HARDRESET;
1671 qc = ata_qc_from_tag(ap, ap->active_tag);
1673 qc->err_mask |= err_mask;
1675 ehi->err_mask |= err_mask;
1677 ata_port_freeze(ap);
1683 * mv_interrupt - Main interrupt event handler
1685 * @dev_instance: private data; in this case the host structure
1687 * Read the read only register to determine if any host
1688 * controllers have pending interrupts. If so, call lower level
1689 * routine to handle. Also check for PCI errors which are only
1693 * This routine holds the host lock while processing pending
1696 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1698 struct ata_host *host = dev_instance;
1699 unsigned int hc, handled = 0, n_hcs;
1700 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1703 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1705 /* check the cases where we either have nothing pending or have read
1706 * a bogus register value which can indicate HW removal or PCI fault
1708 if (!irq_stat || (0xffffffffU == irq_stat))
1711 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1712 spin_lock(&host->lock);
1714 if (unlikely(irq_stat & PCI_ERR)) {
1715 mv_pci_error(host, mmio);
1717 goto out_unlock; /* skip all other HC irq handling */
1720 for (hc = 0; hc < n_hcs; hc++) {
1721 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1723 mv_host_intr(host, relevant, hc);
1729 spin_unlock(&host->lock);
1731 return IRQ_RETVAL(handled);
1734 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1736 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1737 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1739 return hc_mmio + ofs;
1742 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1746 switch (sc_reg_in) {
1750 ofs = sc_reg_in * sizeof(u32);
1759 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1761 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1762 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1763 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1765 if (ofs != 0xffffffffU) {
1766 *val = readl(addr + ofs);
1772 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1774 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1775 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1776 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1778 if (ofs != 0xffffffffU) {
1779 writelfl(val, addr + ofs);
1785 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1789 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1792 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1794 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1797 mv_reset_pci_bus(pdev, mmio);
1800 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1802 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1805 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1808 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1811 tmp = readl(phy_mmio + MV5_PHY_MODE);
1813 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1814 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1817 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1821 writel(0, mmio + MV_GPIO_PORT_CTL);
1823 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1825 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1827 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1830 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1833 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1834 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1836 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1839 tmp = readl(phy_mmio + MV5_LT_MODE);
1841 writel(tmp, phy_mmio + MV5_LT_MODE);
1843 tmp = readl(phy_mmio + MV5_PHY_CTL);
1846 writel(tmp, phy_mmio + MV5_PHY_CTL);
1849 tmp = readl(phy_mmio + MV5_PHY_MODE);
1851 tmp |= hpriv->signal[port].pre;
1852 tmp |= hpriv->signal[port].amps;
1853 writel(tmp, phy_mmio + MV5_PHY_MODE);
1858 #define ZERO(reg) writel(0, port_mmio + (reg))
1859 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1862 void __iomem *port_mmio = mv_port_base(mmio, port);
1864 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1866 mv_channel_reset(hpriv, mmio, port);
1868 ZERO(0x028); /* command */
1869 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1870 ZERO(0x004); /* timer */
1871 ZERO(0x008); /* irq err cause */
1872 ZERO(0x00c); /* irq err mask */
1873 ZERO(0x010); /* rq bah */
1874 ZERO(0x014); /* rq inp */
1875 ZERO(0x018); /* rq outp */
1876 ZERO(0x01c); /* respq bah */
1877 ZERO(0x024); /* respq outp */
1878 ZERO(0x020); /* respq inp */
1879 ZERO(0x02c); /* test control */
1880 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1884 #define ZERO(reg) writel(0, hc_mmio + (reg))
1885 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1888 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1896 tmp = readl(hc_mmio + 0x20);
1899 writel(tmp, hc_mmio + 0x20);
1903 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1906 unsigned int hc, port;
1908 for (hc = 0; hc < n_hc; hc++) {
1909 for (port = 0; port < MV_PORTS_PER_HC; port++)
1910 mv5_reset_hc_port(hpriv, mmio,
1911 (hc * MV_PORTS_PER_HC) + port);
1913 mv5_reset_one_hc(hpriv, mmio, hc);
1920 #define ZERO(reg) writel(0, mmio + (reg))
1921 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1925 tmp = readl(mmio + MV_PCI_MODE);
1927 writel(tmp, mmio + MV_PCI_MODE);
1929 ZERO(MV_PCI_DISC_TIMER);
1930 ZERO(MV_PCI_MSI_TRIGGER);
1931 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1932 ZERO(HC_MAIN_IRQ_MASK_OFS);
1933 ZERO(MV_PCI_SERR_MASK);
1934 ZERO(PCI_IRQ_CAUSE_OFS);
1935 ZERO(PCI_IRQ_MASK_OFS);
1936 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1937 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1938 ZERO(MV_PCI_ERR_ATTRIBUTE);
1939 ZERO(MV_PCI_ERR_COMMAND);
1943 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1947 mv5_reset_flash(hpriv, mmio);
1949 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1951 tmp |= (1 << 5) | (1 << 6);
1952 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1956 * mv6_reset_hc - Perform the 6xxx global soft reset
1957 * @mmio: base address of the HBA
1959 * This routine only applies to 6xxx parts.
1962 * Inherited from caller.
1964 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1967 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1971 /* Following procedure defined in PCI "main command and status
1975 writel(t | STOP_PCI_MASTER, reg);
1977 for (i = 0; i < 1000; i++) {
1980 if (PCI_MASTER_EMPTY & t) {
1984 if (!(PCI_MASTER_EMPTY & t)) {
1985 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1993 writel(t | GLOB_SFT_RST, reg);
1996 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1998 if (!(GLOB_SFT_RST & t)) {
1999 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2004 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2007 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2010 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2012 if (GLOB_SFT_RST & t) {
2013 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2020 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2023 void __iomem *port_mmio;
2026 tmp = readl(mmio + MV_RESET_CFG);
2027 if ((tmp & (1 << 0)) == 0) {
2028 hpriv->signal[idx].amps = 0x7 << 8;
2029 hpriv->signal[idx].pre = 0x1 << 5;
2033 port_mmio = mv_port_base(mmio, idx);
2034 tmp = readl(port_mmio + PHY_MODE2);
2036 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2037 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2040 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2042 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2045 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2048 void __iomem *port_mmio = mv_port_base(mmio, port);
2050 u32 hp_flags = hpriv->hp_flags;
2052 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2054 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2057 if (fix_phy_mode2) {
2058 m2 = readl(port_mmio + PHY_MODE2);
2061 writel(m2, port_mmio + PHY_MODE2);
2065 m2 = readl(port_mmio + PHY_MODE2);
2066 m2 &= ~((1 << 16) | (1 << 31));
2067 writel(m2, port_mmio + PHY_MODE2);
2072 /* who knows what this magic does */
2073 tmp = readl(port_mmio + PHY_MODE3);
2076 writel(tmp, port_mmio + PHY_MODE3);
2078 if (fix_phy_mode4) {
2081 m4 = readl(port_mmio + PHY_MODE4);
2083 if (hp_flags & MV_HP_ERRATA_60X1B2)
2084 tmp = readl(port_mmio + 0x310);
2086 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2088 writel(m4, port_mmio + PHY_MODE4);
2090 if (hp_flags & MV_HP_ERRATA_60X1B2)
2091 writel(tmp, port_mmio + 0x310);
2094 /* Revert values of pre-emphasis and signal amps to the saved ones */
2095 m2 = readl(port_mmio + PHY_MODE2);
2097 m2 &= ~MV_M2_PREAMP_MASK;
2098 m2 |= hpriv->signal[port].amps;
2099 m2 |= hpriv->signal[port].pre;
2102 /* according to mvSata 3.6.1, some IIE values are fixed */
2103 if (IS_GEN_IIE(hpriv)) {
2108 writel(m2, port_mmio + PHY_MODE2);
2111 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2112 unsigned int port_no)
2114 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2116 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2118 if (IS_GEN_II(hpriv)) {
2119 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2120 ifctl |= (1 << 7); /* enable gen2i speed */
2121 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2122 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2125 udelay(25); /* allow reset propagation */
2127 /* Spec never mentions clearing the bit. Marvell's driver does
2128 * clear the bit, however.
2130 writelfl(0, port_mmio + EDMA_CMD_OFS);
2132 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2134 if (IS_GEN_I(hpriv))
2139 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2140 * @ap: ATA channel to manipulate
2142 * Part of this is taken from __sata_phy_reset and modified to
2143 * not sleep since this routine gets called from interrupt level.
2146 * Inherited from caller. This is coded to safe to call at
2147 * interrupt level, i.e. it does not sleep.
2149 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2150 unsigned long deadline)
2152 struct mv_port_priv *pp = ap->private_data;
2153 struct mv_host_priv *hpriv = ap->host->private_data;
2154 void __iomem *port_mmio = mv_ap_base(ap);
2158 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2162 u32 sstatus, serror, scontrol;
2164 mv_scr_read(ap, SCR_STATUS, &sstatus);
2165 mv_scr_read(ap, SCR_ERROR, &serror);
2166 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2167 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2168 "SCtrl 0x%08x\n", status, serror, scontrol);
2172 /* Issue COMRESET via SControl */
2174 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2177 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2181 sata_scr_read(ap, SCR_STATUS, &sstatus);
2182 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2186 } while (time_before(jiffies, deadline));
2188 /* work around errata */
2189 if (IS_GEN_II(hpriv) &&
2190 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2192 goto comreset_retry;
2196 u32 sstatus, serror, scontrol;
2198 mv_scr_read(ap, SCR_STATUS, &sstatus);
2199 mv_scr_read(ap, SCR_ERROR, &serror);
2200 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2201 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2202 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2206 if (ata_port_offline(ap)) {
2207 *class = ATA_DEV_NONE;
2211 /* even after SStatus reflects that device is ready,
2212 * it seems to take a while for link to be fully
2213 * established (and thus Status no longer 0x80/0x7F),
2214 * so we poll a bit for that, here.
2218 u8 drv_stat = ata_check_status(ap);
2219 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2224 if (time_after(jiffies, deadline))
2228 /* FIXME: if we passed the deadline, the following
2229 * code probably produces an invalid result
2232 /* finally, read device signature from TF registers */
2233 *class = ata_dev_try_classify(ap, 0, NULL);
2235 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2237 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2242 static int mv_prereset(struct ata_port *ap, unsigned long deadline)
2244 struct mv_port_priv *pp = ap->private_data;
2245 struct ata_eh_context *ehc = &ap->eh_context;
2248 rc = mv_stop_dma(ap);
2250 ehc->i.action |= ATA_EH_HARDRESET;
2252 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2253 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2254 ehc->i.action |= ATA_EH_HARDRESET;
2257 /* if we're about to do hardreset, nothing more to do */
2258 if (ehc->i.action & ATA_EH_HARDRESET)
2261 if (ata_port_online(ap))
2262 rc = ata_wait_ready(ap, deadline);
2269 static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2270 unsigned long deadline)
2272 struct mv_host_priv *hpriv = ap->host->private_data;
2273 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2277 mv_channel_reset(hpriv, mmio, ap->port_no);
2279 mv_phy_reset(ap, class, deadline);
2284 static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2288 /* print link status */
2289 sata_print_link_status(ap);
2292 sata_scr_read(ap, SCR_ERROR, &serr);
2293 sata_scr_write_flush(ap, SCR_ERROR, serr);
2295 /* bail out if no device is present */
2296 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2297 DPRINTK("EXIT, no device\n");
2301 /* set up device control */
2302 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2305 static void mv_error_handler(struct ata_port *ap)
2307 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2308 mv_hardreset, mv_postreset);
2311 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2313 mv_stop_dma(qc->ap);
2316 static void mv_eh_freeze(struct ata_port *ap)
2318 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2319 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2323 /* FIXME: handle coalescing completion events properly */
2325 shift = ap->port_no * 2;
2329 mask = 0x3 << shift;
2331 /* disable assertion of portN err, done events */
2332 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2333 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2336 static void mv_eh_thaw(struct ata_port *ap)
2338 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2339 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2340 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2341 void __iomem *port_mmio = mv_ap_base(ap);
2342 u32 tmp, mask, hc_irq_cause;
2343 unsigned int shift, hc_port_no = ap->port_no;
2345 /* FIXME: handle coalescing completion events properly */
2347 shift = ap->port_no * 2;
2353 mask = 0x3 << shift;
2355 /* clear EDMA errors on this port */
2356 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2358 /* clear pending irq events */
2359 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2360 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2361 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2362 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2364 /* enable assertion of portN err, done events */
2365 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2366 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2370 * mv_port_init - Perform some early initialization on a single port.
2371 * @port: libata data structure storing shadow register addresses
2372 * @port_mmio: base address of the port
2374 * Initialize shadow register mmio addresses, clear outstanding
2375 * interrupts on the port, and unmask interrupts for the future
2376 * start of the port.
2379 * Inherited from caller.
2381 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2383 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2386 /* PIO related setup
2388 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2390 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2391 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2392 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2393 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2394 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2395 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2397 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2398 /* special case: control/altstatus doesn't have ATA_REG_ address */
2399 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2402 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2404 /* Clear any currently outstanding port interrupt conditions */
2405 serr_ofs = mv_scr_offset(SCR_ERROR);
2406 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2407 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2409 /* unmask all EDMA error interrupts */
2410 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2412 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2413 readl(port_mmio + EDMA_CFG_OFS),
2414 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2415 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2418 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2420 struct pci_dev *pdev = to_pci_dev(host->dev);
2421 struct mv_host_priv *hpriv = host->private_data;
2422 u32 hp_flags = hpriv->hp_flags;
2426 hpriv->ops = &mv5xxx_ops;
2427 hp_flags |= MV_HP_GEN_I;
2429 switch (pdev->revision) {
2431 hp_flags |= MV_HP_ERRATA_50XXB0;
2434 hp_flags |= MV_HP_ERRATA_50XXB2;
2437 dev_printk(KERN_WARNING, &pdev->dev,
2438 "Applying 50XXB2 workarounds to unknown rev\n");
2439 hp_flags |= MV_HP_ERRATA_50XXB2;
2446 hpriv->ops = &mv5xxx_ops;
2447 hp_flags |= MV_HP_GEN_I;
2449 switch (pdev->revision) {
2451 hp_flags |= MV_HP_ERRATA_50XXB0;
2454 hp_flags |= MV_HP_ERRATA_50XXB2;
2457 dev_printk(KERN_WARNING, &pdev->dev,
2458 "Applying B2 workarounds to unknown rev\n");
2459 hp_flags |= MV_HP_ERRATA_50XXB2;
2466 hpriv->ops = &mv6xxx_ops;
2467 hp_flags |= MV_HP_GEN_II;
2469 switch (pdev->revision) {
2471 hp_flags |= MV_HP_ERRATA_60X1B2;
2474 hp_flags |= MV_HP_ERRATA_60X1C0;
2477 dev_printk(KERN_WARNING, &pdev->dev,
2478 "Applying B2 workarounds to unknown rev\n");
2479 hp_flags |= MV_HP_ERRATA_60X1B2;
2486 hpriv->ops = &mv6xxx_ops;
2487 hp_flags |= MV_HP_GEN_IIE;
2489 switch (pdev->revision) {
2491 hp_flags |= MV_HP_ERRATA_XX42A0;
2494 hp_flags |= MV_HP_ERRATA_60X1C0;
2497 dev_printk(KERN_WARNING, &pdev->dev,
2498 "Applying 60X1C0 workarounds to unknown rev\n");
2499 hp_flags |= MV_HP_ERRATA_60X1C0;
2505 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2509 hpriv->hp_flags = hp_flags;
2515 * mv_init_host - Perform some early initialization of the host.
2516 * @host: ATA host to initialize
2517 * @board_idx: controller index
2519 * If possible, do an early global reset of the host. Then do
2520 * our port init and clear/unmask all/relevant host interrupts.
2523 * Inherited from caller.
2525 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2527 int rc = 0, n_hc, port, hc;
2528 struct pci_dev *pdev = to_pci_dev(host->dev);
2529 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2530 struct mv_host_priv *hpriv = host->private_data;
2532 /* global interrupt mask */
2533 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2535 rc = mv_chip_id(host, board_idx);
2539 n_hc = mv_get_hc_count(host->ports[0]->flags);
2541 for (port = 0; port < host->n_ports; port++)
2542 hpriv->ops->read_preamp(hpriv, port, mmio);
2544 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2548 hpriv->ops->reset_flash(hpriv, mmio);
2549 hpriv->ops->reset_bus(pdev, mmio);
2550 hpriv->ops->enable_leds(hpriv, mmio);
2552 for (port = 0; port < host->n_ports; port++) {
2553 if (IS_GEN_II(hpriv)) {
2554 void __iomem *port_mmio = mv_port_base(mmio, port);
2556 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2557 ifctl |= (1 << 7); /* enable gen2i speed */
2558 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2559 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2562 hpriv->ops->phy_errata(hpriv, mmio, port);
2565 for (port = 0; port < host->n_ports; port++) {
2566 void __iomem *port_mmio = mv_port_base(mmio, port);
2567 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2570 for (hc = 0; hc < n_hc; hc++) {
2571 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2573 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2574 "(before clear)=0x%08x\n", hc,
2575 readl(hc_mmio + HC_CFG_OFS),
2576 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2578 /* Clear any currently outstanding hc interrupt conditions */
2579 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2582 /* Clear any currently outstanding host interrupt conditions */
2583 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2585 /* and unmask interrupt generation for host regs */
2586 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2588 if (IS_GEN_I(hpriv))
2589 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2591 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2593 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2594 "PCI int cause/mask=0x%08x/0x%08x\n",
2595 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2596 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2597 readl(mmio + PCI_IRQ_CAUSE_OFS),
2598 readl(mmio + PCI_IRQ_MASK_OFS));
2605 * mv_print_info - Dump key info to kernel log for perusal.
2606 * @host: ATA host to print info about
2608 * FIXME: complete this.
2611 * Inherited from caller.
2613 static void mv_print_info(struct ata_host *host)
2615 struct pci_dev *pdev = to_pci_dev(host->dev);
2616 struct mv_host_priv *hpriv = host->private_data;
2618 const char *scc_s, *gen;
2620 /* Use this to determine the HW stepping of the chip so we know
2621 * what errata to workaround
2623 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2626 else if (scc == 0x01)
2631 if (IS_GEN_I(hpriv))
2633 else if (IS_GEN_II(hpriv))
2635 else if (IS_GEN_IIE(hpriv))
2640 dev_printk(KERN_INFO, &pdev->dev,
2641 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2642 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2643 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2647 * mv_init_one - handle a positive probe of a Marvell host
2648 * @pdev: PCI device found
2649 * @ent: PCI device ID entry for the matched host
2652 * Inherited from caller.
2654 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2656 static int printed_version = 0;
2657 unsigned int board_idx = (unsigned int)ent->driver_data;
2658 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2659 struct ata_host *host;
2660 struct mv_host_priv *hpriv;
2663 if (!printed_version++)
2664 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2667 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2669 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2670 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2671 if (!host || !hpriv)
2673 host->private_data = hpriv;
2675 /* acquire resources */
2676 rc = pcim_enable_device(pdev);
2680 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2682 pcim_pin_device(pdev);
2685 host->iomap = pcim_iomap_table(pdev);
2687 rc = pci_go_64(pdev);
2691 /* initialize adapter */
2692 rc = mv_init_host(host, board_idx);
2696 /* Enable interrupts */
2697 if (msi && pci_enable_msi(pdev))
2700 mv_dump_pci_cfg(pdev, 0x68);
2701 mv_print_info(host);
2703 pci_set_master(pdev);
2704 pci_try_set_mwi(pdev);
2705 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2706 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2709 static int __init mv_init(void)
2711 return pci_register_driver(&mv_pci_driver);
2714 static void __exit mv_exit(void)
2716 pci_unregister_driver(&mv_pci_driver);
2719 MODULE_AUTHOR("Brett Russ");
2720 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2721 MODULE_LICENSE("GPL");
2722 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2723 MODULE_VERSION(DRV_VERSION);
2725 module_param(msi, int, 0444);
2726 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2728 module_init(mv_init);
2729 module_exit(mv_exit);