2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.01"
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
133 CRQB_FLAG_READ = (1 << 0),
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
141 CRPB_FLAG_STATUS_SHIFT = 8,
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
147 /* PCI interface registers */
149 PCI_COMMAND_OFS = 0xc00,
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
171 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
172 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
173 PORT0_ERR = (1 << 0), /* shift by port # */
174 PORT0_DONE = (1 << 1), /* shift by port # */
175 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
176 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
178 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
179 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
180 PORTS_0_3_COAL_DONE = (1 << 8),
181 PORTS_4_7_COAL_DONE = (1 << 17),
182 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
183 GPIO_INT = (1 << 22),
184 SELF_INT = (1 << 23),
185 TWSI_INT = (1 << 24),
186 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
187 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
188 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
189 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
191 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
194 /* SATAHC registers */
197 HC_IRQ_CAUSE_OFS = 0x14,
198 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
199 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
200 DEV_IRQ = (1 << 8), /* shift by port # */
202 /* Shadow block registers */
204 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
207 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
208 SATA_ACTIVE_OFS = 0x350,
215 SATA_INTERFACE_CTL = 0x050,
217 MV_M2_PREAMP_MASK = 0x7e0,
221 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
222 EDMA_CFG_NCQ = (1 << 5),
223 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
224 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
225 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
227 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
228 EDMA_ERR_IRQ_MASK_OFS = 0xc,
229 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
230 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
231 EDMA_ERR_DEV = (1 << 2), /* device error */
232 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
233 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
234 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
235 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
236 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
237 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
238 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
239 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
240 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
241 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
242 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
243 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
244 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
245 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
246 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
247 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
248 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
249 EDMA_ERR_OVERRUN_5 = (1 << 5),
250 EDMA_ERR_UNDERRUN_5 = (1 << 6),
251 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
261 EDMA_ERR_LNK_CTRL_RX_2 |
262 EDMA_ERR_LNK_DATA_RX |
263 EDMA_ERR_LNK_DATA_TX |
264 EDMA_ERR_TRANS_PROTO,
265 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
270 EDMA_ERR_UNDERRUN_5 |
271 EDMA_ERR_SELF_DIS_5 |
277 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
278 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
280 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
281 EDMA_REQ_Q_PTR_SHIFT = 5,
283 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
284 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
285 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
286 EDMA_RSP_Q_PTR_SHIFT = 3,
288 EDMA_CMD_OFS = 0x28, /* EDMA command register */
289 EDMA_EN = (1 << 0), /* enable EDMA */
290 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
291 ATA_RST = (1 << 2), /* reset trans/link/phy */
293 EDMA_IORDY_TMOUT = 0x34,
296 /* Host private flags (hp_flags) */
297 MV_HP_FLAG_MSI = (1 << 0),
298 MV_HP_ERRATA_50XXB0 = (1 << 1),
299 MV_HP_ERRATA_50XXB2 = (1 << 2),
300 MV_HP_ERRATA_60X1B2 = (1 << 3),
301 MV_HP_ERRATA_60X1C0 = (1 << 4),
302 MV_HP_ERRATA_XX42A0 = (1 << 5),
303 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
304 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
305 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
307 /* Port private flags (pp_flags) */
308 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
309 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
312 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
313 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
314 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
317 /* DMA boundary 0xffff is required by the s/g splitting
318 * we need on /length/ in mv_fill-sg().
320 MV_DMA_BOUNDARY = 0xffffU,
322 /* mask of register bits containing lower 32 bits
323 * of EDMA request queue DMA address
325 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
327 /* ditto, for response queue */
328 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
341 /* Command ReQuest Block: 32B */
357 /* Command ResPonse Block: 8B */
364 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
372 struct mv_port_priv {
373 struct mv_crqb *crqb;
375 struct mv_crpb *crpb;
377 struct mv_sg *sg_tbl;
378 dma_addr_t sg_tbl_dma;
380 unsigned int req_idx;
381 unsigned int resp_idx;
386 struct mv_port_signal {
393 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
395 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
398 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
400 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
401 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
404 struct mv_host_priv {
406 struct mv_port_signal signal[8];
407 const struct mv_hw_ops *ops;
410 static void mv_irq_clear(struct ata_port *ap);
411 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
412 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
413 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
414 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
415 static int mv_port_start(struct ata_port *ap);
416 static void mv_port_stop(struct ata_port *ap);
417 static void mv_qc_prep(struct ata_queued_cmd *qc);
418 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
419 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
420 static void mv_error_handler(struct ata_port *ap);
421 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
422 static void mv_eh_freeze(struct ata_port *ap);
423 static void mv_eh_thaw(struct ata_port *ap);
424 static int mv_slave_config(struct scsi_device *sdev);
425 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
427 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
429 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
430 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
432 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
434 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
435 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
437 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
439 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
440 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
442 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
444 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
445 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
446 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
447 unsigned int port_no);
449 static struct scsi_host_template mv5_sht = {
450 .module = THIS_MODULE,
452 .ioctl = ata_scsi_ioctl,
453 .queuecommand = ata_scsi_queuecmd,
454 .can_queue = ATA_DEF_QUEUE,
455 .this_id = ATA_SHT_THIS_ID,
456 .sg_tablesize = MV_MAX_SG_CT / 2,
457 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
458 .emulated = ATA_SHT_EMULATED,
460 .proc_name = DRV_NAME,
461 .dma_boundary = MV_DMA_BOUNDARY,
462 .slave_configure = mv_slave_config,
463 .slave_destroy = ata_scsi_slave_destroy,
464 .bios_param = ata_std_bios_param,
467 static struct scsi_host_template mv6_sht = {
468 .module = THIS_MODULE,
470 .ioctl = ata_scsi_ioctl,
471 .queuecommand = ata_scsi_queuecmd,
472 .can_queue = ATA_DEF_QUEUE,
473 .this_id = ATA_SHT_THIS_ID,
474 .sg_tablesize = MV_MAX_SG_CT / 2,
475 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
476 .emulated = ATA_SHT_EMULATED,
478 .proc_name = DRV_NAME,
479 .dma_boundary = MV_DMA_BOUNDARY,
480 .slave_configure = mv_slave_config,
481 .slave_destroy = ata_scsi_slave_destroy,
482 .bios_param = ata_std_bios_param,
485 static const struct ata_port_operations mv5_ops = {
486 .port_disable = ata_port_disable,
488 .tf_load = ata_tf_load,
489 .tf_read = ata_tf_read,
490 .check_status = ata_check_status,
491 .exec_command = ata_exec_command,
492 .dev_select = ata_std_dev_select,
494 .cable_detect = ata_cable_sata,
496 .qc_prep = mv_qc_prep,
497 .qc_issue = mv_qc_issue,
498 .data_xfer = ata_data_xfer,
500 .irq_clear = mv_irq_clear,
501 .irq_on = ata_irq_on,
502 .irq_ack = ata_irq_ack,
504 .error_handler = mv_error_handler,
505 .post_internal_cmd = mv_post_int_cmd,
506 .freeze = mv_eh_freeze,
509 .scr_read = mv5_scr_read,
510 .scr_write = mv5_scr_write,
512 .port_start = mv_port_start,
513 .port_stop = mv_port_stop,
516 static const struct ata_port_operations mv6_ops = {
517 .port_disable = ata_port_disable,
519 .tf_load = ata_tf_load,
520 .tf_read = ata_tf_read,
521 .check_status = ata_check_status,
522 .exec_command = ata_exec_command,
523 .dev_select = ata_std_dev_select,
525 .cable_detect = ata_cable_sata,
527 .qc_prep = mv_qc_prep,
528 .qc_issue = mv_qc_issue,
529 .data_xfer = ata_data_xfer,
531 .irq_clear = mv_irq_clear,
532 .irq_on = ata_irq_on,
533 .irq_ack = ata_irq_ack,
535 .error_handler = mv_error_handler,
536 .post_internal_cmd = mv_post_int_cmd,
537 .freeze = mv_eh_freeze,
540 .scr_read = mv_scr_read,
541 .scr_write = mv_scr_write,
543 .port_start = mv_port_start,
544 .port_stop = mv_port_stop,
547 static const struct ata_port_operations mv_iie_ops = {
548 .port_disable = ata_port_disable,
550 .tf_load = ata_tf_load,
551 .tf_read = ata_tf_read,
552 .check_status = ata_check_status,
553 .exec_command = ata_exec_command,
554 .dev_select = ata_std_dev_select,
556 .cable_detect = ata_cable_sata,
558 .qc_prep = mv_qc_prep_iie,
559 .qc_issue = mv_qc_issue,
560 .data_xfer = ata_data_xfer,
562 .irq_clear = mv_irq_clear,
563 .irq_on = ata_irq_on,
564 .irq_ack = ata_irq_ack,
566 .error_handler = mv_error_handler,
567 .post_internal_cmd = mv_post_int_cmd,
568 .freeze = mv_eh_freeze,
571 .scr_read = mv_scr_read,
572 .scr_write = mv_scr_write,
574 .port_start = mv_port_start,
575 .port_stop = mv_port_stop,
578 static const struct ata_port_info mv_port_info[] = {
580 .flags = MV_COMMON_FLAGS,
581 .pio_mask = 0x1f, /* pio0-4 */
582 .udma_mask = ATA_UDMA6,
583 .port_ops = &mv5_ops,
586 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
587 .pio_mask = 0x1f, /* pio0-4 */
588 .udma_mask = ATA_UDMA6,
589 .port_ops = &mv5_ops,
592 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
593 .pio_mask = 0x1f, /* pio0-4 */
594 .udma_mask = ATA_UDMA6,
595 .port_ops = &mv5_ops,
598 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
599 .pio_mask = 0x1f, /* pio0-4 */
600 .udma_mask = ATA_UDMA6,
601 .port_ops = &mv6_ops,
604 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
606 .pio_mask = 0x1f, /* pio0-4 */
607 .udma_mask = ATA_UDMA6,
608 .port_ops = &mv6_ops,
611 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
612 .pio_mask = 0x1f, /* pio0-4 */
613 .udma_mask = ATA_UDMA6,
614 .port_ops = &mv_iie_ops,
617 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
618 .pio_mask = 0x1f, /* pio0-4 */
619 .udma_mask = ATA_UDMA6,
620 .port_ops = &mv_iie_ops,
624 static const struct pci_device_id mv_pci_tbl[] = {
625 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
626 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
627 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
628 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
629 /* RocketRAID 1740/174x have different identifiers */
630 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
631 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
633 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
634 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
635 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
636 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
637 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
639 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
642 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
644 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
646 /* add Marvell 7042 support */
647 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
649 { } /* terminate list */
652 static struct pci_driver mv_pci_driver = {
654 .id_table = mv_pci_tbl,
655 .probe = mv_init_one,
656 .remove = ata_pci_remove_one,
659 static const struct mv_hw_ops mv5xxx_ops = {
660 .phy_errata = mv5_phy_errata,
661 .enable_leds = mv5_enable_leds,
662 .read_preamp = mv5_read_preamp,
663 .reset_hc = mv5_reset_hc,
664 .reset_flash = mv5_reset_flash,
665 .reset_bus = mv5_reset_bus,
668 static const struct mv_hw_ops mv6xxx_ops = {
669 .phy_errata = mv6_phy_errata,
670 .enable_leds = mv6_enable_leds,
671 .read_preamp = mv6_read_preamp,
672 .reset_hc = mv6_reset_hc,
673 .reset_flash = mv6_reset_flash,
674 .reset_bus = mv_reset_pci_bus,
680 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
683 /* move to PCI layer or libata core? */
684 static int pci_go_64(struct pci_dev *pdev)
688 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
689 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
691 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
693 dev_printk(KERN_ERR, &pdev->dev,
694 "64-bit DMA enable failed\n");
699 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
701 dev_printk(KERN_ERR, &pdev->dev,
702 "32-bit DMA enable failed\n");
705 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
707 dev_printk(KERN_ERR, &pdev->dev,
708 "32-bit consistent DMA enable failed\n");
720 static inline void writelfl(unsigned long data, void __iomem *addr)
723 (void) readl(addr); /* flush to avoid PCI posted write */
726 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
728 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
731 static inline unsigned int mv_hc_from_port(unsigned int port)
733 return port >> MV_PORT_HC_SHIFT;
736 static inline unsigned int mv_hardport_from_port(unsigned int port)
738 return port & MV_PORT_MASK;
741 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
744 return mv_hc_base(base, mv_hc_from_port(port));
747 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
749 return mv_hc_base_from_port(base, port) +
750 MV_SATAHC_ARBTR_REG_SZ +
751 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
754 static inline void __iomem *mv_ap_base(struct ata_port *ap)
756 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
759 static inline int mv_get_hc_count(unsigned long port_flags)
761 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
764 static void mv_irq_clear(struct ata_port *ap)
768 static int mv_slave_config(struct scsi_device *sdev)
770 int rc = ata_scsi_slave_config(sdev);
774 blk_queue_max_phys_segments(sdev->request_queue, MV_MAX_SG_CT / 2);
776 return 0; /* scsi layer doesn't check return value, sigh */
779 static void mv_set_edma_ptrs(void __iomem *port_mmio,
780 struct mv_host_priv *hpriv,
781 struct mv_port_priv *pp)
786 * initialize request queue
788 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
790 WARN_ON(pp->crqb_dma & 0x3ff);
791 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
792 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
793 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
795 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
796 writelfl((pp->crqb_dma & 0xffffffff) | index,
797 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
799 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
802 * initialize response queue
804 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
806 WARN_ON(pp->crpb_dma & 0xff);
807 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
809 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
810 writelfl((pp->crpb_dma & 0xffffffff) | index,
811 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
813 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
815 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
816 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
820 * mv_start_dma - Enable eDMA engine
821 * @base: port base address
822 * @pp: port private data
824 * Verify the local cache of the eDMA state is accurate with a
828 * Inherited from caller.
830 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
831 struct mv_port_priv *pp)
833 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
834 /* clear EDMA event indicators, if any */
835 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
837 mv_set_edma_ptrs(base, hpriv, pp);
839 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
840 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
842 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
846 * __mv_stop_dma - Disable eDMA engine
847 * @ap: ATA channel to manipulate
849 * Verify the local cache of the eDMA state is accurate with a
853 * Inherited from caller.
855 static int __mv_stop_dma(struct ata_port *ap)
857 void __iomem *port_mmio = mv_ap_base(ap);
858 struct mv_port_priv *pp = ap->private_data;
862 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
863 /* Disable EDMA if active. The disable bit auto clears.
865 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
866 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
868 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
871 /* now properly wait for the eDMA to stop */
872 for (i = 1000; i > 0; i--) {
873 reg = readl(port_mmio + EDMA_CMD_OFS);
874 if (!(reg & EDMA_EN))
881 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
888 static int mv_stop_dma(struct ata_port *ap)
893 spin_lock_irqsave(&ap->host->lock, flags);
894 rc = __mv_stop_dma(ap);
895 spin_unlock_irqrestore(&ap->host->lock, flags);
901 static void mv_dump_mem(void __iomem *start, unsigned bytes)
904 for (b = 0; b < bytes; ) {
905 DPRINTK("%p: ", start + b);
906 for (w = 0; b < bytes && w < 4; w++) {
907 printk("%08x ",readl(start + b));
915 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
920 for (b = 0; b < bytes; ) {
921 DPRINTK("%02x: ", b);
922 for (w = 0; b < bytes && w < 4; w++) {
923 (void) pci_read_config_dword(pdev,b,&dw);
931 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
932 struct pci_dev *pdev)
935 void __iomem *hc_base = mv_hc_base(mmio_base,
936 port >> MV_PORT_HC_SHIFT);
937 void __iomem *port_base;
938 int start_port, num_ports, p, start_hc, num_hcs, hc;
941 start_hc = start_port = 0;
942 num_ports = 8; /* shld be benign for 4 port devs */
945 start_hc = port >> MV_PORT_HC_SHIFT;
947 num_ports = num_hcs = 1;
949 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
950 num_ports > 1 ? num_ports - 1 : start_port);
953 DPRINTK("PCI config space regs:\n");
954 mv_dump_pci_cfg(pdev, 0x68);
956 DPRINTK("PCI regs:\n");
957 mv_dump_mem(mmio_base+0xc00, 0x3c);
958 mv_dump_mem(mmio_base+0xd00, 0x34);
959 mv_dump_mem(mmio_base+0xf00, 0x4);
960 mv_dump_mem(mmio_base+0x1d00, 0x6c);
961 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
962 hc_base = mv_hc_base(mmio_base, hc);
963 DPRINTK("HC regs (HC %i):\n", hc);
964 mv_dump_mem(hc_base, 0x1c);
966 for (p = start_port; p < start_port + num_ports; p++) {
967 port_base = mv_port_base(mmio_base, p);
968 DPRINTK("EDMA regs (port %i):\n",p);
969 mv_dump_mem(port_base, 0x54);
970 DPRINTK("SATA regs (port %i):\n",p);
971 mv_dump_mem(port_base+0x300, 0x60);
976 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
984 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
987 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
996 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
998 unsigned int ofs = mv_scr_offset(sc_reg_in);
1000 if (ofs != 0xffffffffU) {
1001 *val = readl(mv_ap_base(ap) + ofs);
1007 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1009 unsigned int ofs = mv_scr_offset(sc_reg_in);
1011 if (ofs != 0xffffffffU) {
1012 writelfl(val, mv_ap_base(ap) + ofs);
1018 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1019 void __iomem *port_mmio)
1021 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1023 /* set up non-NCQ EDMA configuration */
1024 cfg &= ~(1 << 9); /* disable eQue */
1026 if (IS_GEN_I(hpriv)) {
1027 cfg &= ~0x1f; /* clear queue depth */
1028 cfg |= (1 << 8); /* enab config burst size mask */
1031 else if (IS_GEN_II(hpriv)) {
1032 cfg &= ~0x1f; /* clear queue depth */
1033 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1034 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1037 else if (IS_GEN_IIE(hpriv)) {
1038 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1039 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1040 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1041 cfg |= (1 << 18); /* enab early completion */
1042 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1043 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
1044 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1047 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1051 * mv_port_start - Port specific init/start routine.
1052 * @ap: ATA channel to manipulate
1054 * Allocate and point to DMA memory, init port private memory,
1058 * Inherited from caller.
1060 static int mv_port_start(struct ata_port *ap)
1062 struct device *dev = ap->host->dev;
1063 struct mv_host_priv *hpriv = ap->host->private_data;
1064 struct mv_port_priv *pp;
1065 void __iomem *port_mmio = mv_ap_base(ap);
1068 unsigned long flags;
1071 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1075 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1079 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1081 rc = ata_pad_alloc(ap, dev);
1085 /* First item in chunk of DMA memory:
1086 * 32-slot command request table (CRQB), 32 bytes each in size
1089 pp->crqb_dma = mem_dma;
1090 mem += MV_CRQB_Q_SZ;
1091 mem_dma += MV_CRQB_Q_SZ;
1094 * 32-slot command response table (CRPB), 8 bytes each in size
1097 pp->crpb_dma = mem_dma;
1098 mem += MV_CRPB_Q_SZ;
1099 mem_dma += MV_CRPB_Q_SZ;
1102 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1105 pp->sg_tbl_dma = mem_dma;
1107 spin_lock_irqsave(&ap->host->lock, flags);
1109 mv_edma_cfg(ap, hpriv, port_mmio);
1111 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1113 spin_unlock_irqrestore(&ap->host->lock, flags);
1115 /* Don't turn on EDMA here...do it before DMA commands only. Else
1116 * we'll be unable to send non-data, PIO, etc due to restricted access
1119 ap->private_data = pp;
1124 * mv_port_stop - Port specific cleanup/stop routine.
1125 * @ap: ATA channel to manipulate
1127 * Stop DMA, cleanup port memory.
1130 * This routine uses the host lock to protect the DMA stop.
1132 static void mv_port_stop(struct ata_port *ap)
1138 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1139 * @qc: queued command whose SG list to source from
1141 * Populate the SG list and mark the last entry.
1144 * Inherited from caller.
1146 static void mv_fill_sg(struct ata_queued_cmd *qc)
1148 struct mv_port_priv *pp = qc->ap->private_data;
1149 struct scatterlist *sg;
1150 struct mv_sg *mv_sg;
1153 ata_for_each_sg(sg, qc) {
1154 dma_addr_t addr = sg_dma_address(sg);
1155 u32 sg_len = sg_dma_len(sg);
1158 u32 offset = addr & 0xffff;
1161 if ((offset + sg_len > 0x10000))
1162 len = 0x10000 - offset;
1164 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1165 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1166 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1171 if (!sg_len && ata_sg_is_last(sg, qc))
1172 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1180 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1182 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1183 (last ? CRQB_CMD_LAST : 0);
1184 *cmdw = cpu_to_le16(tmp);
1188 * mv_qc_prep - Host specific command preparation.
1189 * @qc: queued command to prepare
1191 * This routine simply redirects to the general purpose routine
1192 * if command is not DMA. Else, it handles prep of the CRQB
1193 * (command request block), does some sanity checking, and calls
1194 * the SG load routine.
1197 * Inherited from caller.
1199 static void mv_qc_prep(struct ata_queued_cmd *qc)
1201 struct ata_port *ap = qc->ap;
1202 struct mv_port_priv *pp = ap->private_data;
1204 struct ata_taskfile *tf;
1208 if (qc->tf.protocol != ATA_PROT_DMA)
1211 /* Fill in command request block
1213 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1214 flags |= CRQB_FLAG_READ;
1215 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1216 flags |= qc->tag << CRQB_TAG_SHIFT;
1217 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1219 /* get current queue index from software */
1220 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1222 pp->crqb[in_index].sg_addr =
1223 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1224 pp->crqb[in_index].sg_addr_hi =
1225 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1226 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1228 cw = &pp->crqb[in_index].ata_cmd[0];
1231 /* Sadly, the CRQB cannot accomodate all registers--there are
1232 * only 11 bytes...so we must pick and choose required
1233 * registers based on the command. So, we drop feature and
1234 * hob_feature for [RW] DMA commands, but they are needed for
1235 * NCQ. NCQ will drop hob_nsect.
1237 switch (tf->command) {
1239 case ATA_CMD_READ_EXT:
1241 case ATA_CMD_WRITE_EXT:
1242 case ATA_CMD_WRITE_FUA_EXT:
1243 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1245 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1246 case ATA_CMD_FPDMA_READ:
1247 case ATA_CMD_FPDMA_WRITE:
1248 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1249 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1251 #endif /* FIXME: remove this line when NCQ added */
1253 /* The only other commands EDMA supports in non-queued and
1254 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1255 * of which are defined/used by Linux. If we get here, this
1256 * driver needs work.
1258 * FIXME: modify libata to give qc_prep a return value and
1259 * return error here.
1261 BUG_ON(tf->command);
1264 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1265 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1266 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1267 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1268 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1269 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1270 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1271 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1272 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1274 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1280 * mv_qc_prep_iie - Host specific command preparation.
1281 * @qc: queued command to prepare
1283 * This routine simply redirects to the general purpose routine
1284 * if command is not DMA. Else, it handles prep of the CRQB
1285 * (command request block), does some sanity checking, and calls
1286 * the SG load routine.
1289 * Inherited from caller.
1291 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1293 struct ata_port *ap = qc->ap;
1294 struct mv_port_priv *pp = ap->private_data;
1295 struct mv_crqb_iie *crqb;
1296 struct ata_taskfile *tf;
1300 if (qc->tf.protocol != ATA_PROT_DMA)
1303 /* Fill in Gen IIE command request block
1305 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1306 flags |= CRQB_FLAG_READ;
1308 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1309 flags |= qc->tag << CRQB_TAG_SHIFT;
1310 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1311 what we use as our tag */
1313 /* get current queue index from software */
1314 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1316 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1317 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1318 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1319 crqb->flags = cpu_to_le32(flags);
1322 crqb->ata_cmd[0] = cpu_to_le32(
1323 (tf->command << 16) |
1326 crqb->ata_cmd[1] = cpu_to_le32(
1332 crqb->ata_cmd[2] = cpu_to_le32(
1333 (tf->hob_lbal << 0) |
1334 (tf->hob_lbam << 8) |
1335 (tf->hob_lbah << 16) |
1336 (tf->hob_feature << 24)
1338 crqb->ata_cmd[3] = cpu_to_le32(
1340 (tf->hob_nsect << 8)
1343 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1349 * mv_qc_issue - Initiate a command to the host
1350 * @qc: queued command to start
1352 * This routine simply redirects to the general purpose routine
1353 * if command is not DMA. Else, it sanity checks our local
1354 * caches of the request producer/consumer indices then enables
1355 * DMA and bumps the request producer index.
1358 * Inherited from caller.
1360 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1362 struct ata_port *ap = qc->ap;
1363 void __iomem *port_mmio = mv_ap_base(ap);
1364 struct mv_port_priv *pp = ap->private_data;
1365 struct mv_host_priv *hpriv = ap->host->private_data;
1368 if (qc->tf.protocol != ATA_PROT_DMA) {
1369 /* We're about to send a non-EDMA capable command to the
1370 * port. Turn off EDMA so there won't be problems accessing
1371 * shadow block, etc registers.
1374 return ata_qc_issue_prot(qc);
1377 mv_start_dma(port_mmio, hpriv, pp);
1379 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1381 /* until we do queuing, the queue should be empty at this point */
1382 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1383 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1387 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1389 /* and write the request in pointer to kick the EDMA to life */
1390 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1391 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1397 * mv_err_intr - Handle error interrupts on the port
1398 * @ap: ATA channel to manipulate
1399 * @reset_allowed: bool: 0 == don't trigger from reset here
1401 * In most cases, just clear the interrupt and move on. However,
1402 * some cases require an eDMA reset, which is done right before
1403 * the COMRESET in mv_phy_reset(). The SERR case requires a
1404 * clear of pending errors in the SATA SERROR register. Finally,
1405 * if the port disabled DMA, update our cached copy to match.
1408 * Inherited from caller.
1410 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1412 void __iomem *port_mmio = mv_ap_base(ap);
1413 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1414 struct mv_port_priv *pp = ap->private_data;
1415 struct mv_host_priv *hpriv = ap->host->private_data;
1416 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1417 unsigned int action = 0, err_mask = 0;
1418 struct ata_eh_info *ehi = &ap->eh_info;
1420 ata_ehi_clear_desc(ehi);
1422 if (!edma_enabled) {
1423 /* just a guess: do we need to do this? should we
1424 * expand this, and do it in all cases?
1426 sata_scr_read(ap, SCR_ERROR, &serr);
1427 sata_scr_write_flush(ap, SCR_ERROR, serr);
1430 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1432 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1435 * all generations share these EDMA error cause bits
1438 if (edma_err_cause & EDMA_ERR_DEV)
1439 err_mask |= AC_ERR_DEV;
1440 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1441 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1442 EDMA_ERR_INTRL_PAR)) {
1443 err_mask |= AC_ERR_ATA_BUS;
1444 action |= ATA_EH_HARDRESET;
1445 ata_ehi_push_desc(ehi, "parity error");
1447 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1448 ata_ehi_hotplugged(ehi);
1449 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1450 "dev disconnect" : "dev connect");
1453 if (IS_GEN_I(hpriv)) {
1454 eh_freeze_mask = EDMA_EH_FREEZE_5;
1456 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1457 struct mv_port_priv *pp = ap->private_data;
1458 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1459 ata_ehi_push_desc(ehi, "EDMA self-disable");
1462 eh_freeze_mask = EDMA_EH_FREEZE;
1464 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1465 struct mv_port_priv *pp = ap->private_data;
1466 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1467 ata_ehi_push_desc(ehi, "EDMA self-disable");
1470 if (edma_err_cause & EDMA_ERR_SERR) {
1471 sata_scr_read(ap, SCR_ERROR, &serr);
1472 sata_scr_write_flush(ap, SCR_ERROR, serr);
1473 err_mask = AC_ERR_ATA_BUS;
1474 action |= ATA_EH_HARDRESET;
1478 /* Clear EDMA now that SERR cleanup done */
1479 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1482 err_mask = AC_ERR_OTHER;
1483 action |= ATA_EH_HARDRESET;
1486 ehi->serror |= serr;
1487 ehi->action |= action;
1490 qc->err_mask |= err_mask;
1492 ehi->err_mask |= err_mask;
1494 if (edma_err_cause & eh_freeze_mask)
1495 ata_port_freeze(ap);
1500 static void mv_intr_pio(struct ata_port *ap)
1502 struct ata_queued_cmd *qc;
1505 /* ignore spurious intr if drive still BUSY */
1506 ata_status = readb(ap->ioaddr.status_addr);
1507 if (unlikely(ata_status & ATA_BUSY))
1510 /* get active ATA command */
1511 qc = ata_qc_from_tag(ap, ap->active_tag);
1512 if (unlikely(!qc)) /* no active tag */
1514 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1517 /* and finally, complete the ATA command */
1518 qc->err_mask |= ac_err_mask(ata_status);
1519 ata_qc_complete(qc);
1522 static void mv_intr_edma(struct ata_port *ap)
1524 void __iomem *port_mmio = mv_ap_base(ap);
1525 struct mv_host_priv *hpriv = ap->host->private_data;
1526 struct mv_port_priv *pp = ap->private_data;
1527 struct ata_queued_cmd *qc;
1528 u32 out_index, in_index;
1529 bool work_done = false;
1531 /* get h/w response queue pointer */
1532 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1533 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1539 /* get s/w response queue last-read pointer, and compare */
1540 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1541 if (in_index == out_index)
1544 /* 50xx: get active ATA command */
1545 if (IS_GEN_I(hpriv))
1546 tag = ap->active_tag;
1548 /* Gen II/IIE: get active ATA command via tag, to enable
1549 * support for queueing. this works transparently for
1550 * queued and non-queued modes.
1552 else if (IS_GEN_II(hpriv))
1553 tag = (le16_to_cpu(pp->crpb[out_index].id)
1554 >> CRPB_IOID_SHIFT_6) & 0x3f;
1556 else /* IS_GEN_IIE */
1557 tag = (le16_to_cpu(pp->crpb[out_index].id)
1558 >> CRPB_IOID_SHIFT_7) & 0x3f;
1560 qc = ata_qc_from_tag(ap, tag);
1562 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1563 * bits (WARNING: might not necessarily be associated
1564 * with this command), which -should- be clear
1567 status = le16_to_cpu(pp->crpb[out_index].flags);
1568 if (unlikely(status & 0xff)) {
1569 mv_err_intr(ap, qc);
1573 /* and finally, complete the ATA command */
1576 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1577 ata_qc_complete(qc);
1580 /* advance software response queue pointer, to
1581 * indicate (after the loop completes) to hardware
1582 * that we have consumed a response queue entry.
1589 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1590 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1591 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1595 * mv_host_intr - Handle all interrupts on the given host controller
1596 * @host: host specific structure
1597 * @relevant: port error bits relevant to this host controller
1598 * @hc: which host controller we're to look at
1600 * Read then write clear the HC interrupt status then walk each
1601 * port connected to the HC and see if it needs servicing. Port
1602 * success ints are reported in the HC interrupt status reg, the
1603 * port error ints are reported in the higher level main
1604 * interrupt status register and thus are passed in via the
1605 * 'relevant' argument.
1608 * Inherited from caller.
1610 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1612 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1613 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1620 port0 = MV_PORTS_PER_HC;
1622 /* we'll need the HC success int register in most cases */
1623 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1627 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1629 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1630 hc,relevant,hc_irq_cause);
1632 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1633 struct ata_port *ap = host->ports[port];
1634 struct mv_port_priv *pp = ap->private_data;
1635 int have_err_bits, hard_port, shift;
1637 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1640 shift = port << 1; /* (port * 2) */
1641 if (port >= MV_PORTS_PER_HC) {
1642 shift++; /* skip bit 8 in the HC Main IRQ reg */
1644 have_err_bits = ((PORT0_ERR << shift) & relevant);
1646 if (unlikely(have_err_bits)) {
1647 struct ata_queued_cmd *qc;
1649 qc = ata_qc_from_tag(ap, ap->active_tag);
1650 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1653 mv_err_intr(ap, qc);
1657 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1659 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1660 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1663 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1670 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1672 struct ata_port *ap;
1673 struct ata_queued_cmd *qc;
1674 struct ata_eh_info *ehi;
1675 unsigned int i, err_mask, printed = 0;
1678 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1680 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1683 DPRINTK("All regs @ PCI error\n");
1684 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1686 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1688 for (i = 0; i < host->n_ports; i++) {
1689 ap = host->ports[i];
1690 if (!ata_port_offline(ap)) {
1692 ata_ehi_clear_desc(ehi);
1694 ata_ehi_push_desc(ehi,
1695 "PCI err cause 0x%08x", err_cause);
1696 err_mask = AC_ERR_HOST_BUS;
1697 ehi->action = ATA_EH_HARDRESET;
1698 qc = ata_qc_from_tag(ap, ap->active_tag);
1700 qc->err_mask |= err_mask;
1702 ehi->err_mask |= err_mask;
1704 ata_port_freeze(ap);
1710 * mv_interrupt - Main interrupt event handler
1712 * @dev_instance: private data; in this case the host structure
1714 * Read the read only register to determine if any host
1715 * controllers have pending interrupts. If so, call lower level
1716 * routine to handle. Also check for PCI errors which are only
1720 * This routine holds the host lock while processing pending
1723 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1725 struct ata_host *host = dev_instance;
1726 unsigned int hc, handled = 0, n_hcs;
1727 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1730 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1732 /* check the cases where we either have nothing pending or have read
1733 * a bogus register value which can indicate HW removal or PCI fault
1735 if (!irq_stat || (0xffffffffU == irq_stat))
1738 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1739 spin_lock(&host->lock);
1741 if (unlikely(irq_stat & PCI_ERR)) {
1742 mv_pci_error(host, mmio);
1744 goto out_unlock; /* skip all other HC irq handling */
1747 for (hc = 0; hc < n_hcs; hc++) {
1748 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1750 mv_host_intr(host, relevant, hc);
1756 spin_unlock(&host->lock);
1758 return IRQ_RETVAL(handled);
1761 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1763 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1764 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1766 return hc_mmio + ofs;
1769 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1773 switch (sc_reg_in) {
1777 ofs = sc_reg_in * sizeof(u32);
1786 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1788 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1789 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1790 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1792 if (ofs != 0xffffffffU) {
1793 *val = readl(addr + ofs);
1799 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1801 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1802 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1803 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1805 if (ofs != 0xffffffffU) {
1806 writelfl(val, addr + ofs);
1812 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1816 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1819 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1821 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1824 mv_reset_pci_bus(pdev, mmio);
1827 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1829 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1832 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1835 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1838 tmp = readl(phy_mmio + MV5_PHY_MODE);
1840 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1841 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1844 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1848 writel(0, mmio + MV_GPIO_PORT_CTL);
1850 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1852 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1854 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1857 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1860 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1861 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1863 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1866 tmp = readl(phy_mmio + MV5_LT_MODE);
1868 writel(tmp, phy_mmio + MV5_LT_MODE);
1870 tmp = readl(phy_mmio + MV5_PHY_CTL);
1873 writel(tmp, phy_mmio + MV5_PHY_CTL);
1876 tmp = readl(phy_mmio + MV5_PHY_MODE);
1878 tmp |= hpriv->signal[port].pre;
1879 tmp |= hpriv->signal[port].amps;
1880 writel(tmp, phy_mmio + MV5_PHY_MODE);
1885 #define ZERO(reg) writel(0, port_mmio + (reg))
1886 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1889 void __iomem *port_mmio = mv_port_base(mmio, port);
1891 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1893 mv_channel_reset(hpriv, mmio, port);
1895 ZERO(0x028); /* command */
1896 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1897 ZERO(0x004); /* timer */
1898 ZERO(0x008); /* irq err cause */
1899 ZERO(0x00c); /* irq err mask */
1900 ZERO(0x010); /* rq bah */
1901 ZERO(0x014); /* rq inp */
1902 ZERO(0x018); /* rq outp */
1903 ZERO(0x01c); /* respq bah */
1904 ZERO(0x024); /* respq outp */
1905 ZERO(0x020); /* respq inp */
1906 ZERO(0x02c); /* test control */
1907 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1911 #define ZERO(reg) writel(0, hc_mmio + (reg))
1912 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1915 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1923 tmp = readl(hc_mmio + 0x20);
1926 writel(tmp, hc_mmio + 0x20);
1930 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1933 unsigned int hc, port;
1935 for (hc = 0; hc < n_hc; hc++) {
1936 for (port = 0; port < MV_PORTS_PER_HC; port++)
1937 mv5_reset_hc_port(hpriv, mmio,
1938 (hc * MV_PORTS_PER_HC) + port);
1940 mv5_reset_one_hc(hpriv, mmio, hc);
1947 #define ZERO(reg) writel(0, mmio + (reg))
1948 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1952 tmp = readl(mmio + MV_PCI_MODE);
1954 writel(tmp, mmio + MV_PCI_MODE);
1956 ZERO(MV_PCI_DISC_TIMER);
1957 ZERO(MV_PCI_MSI_TRIGGER);
1958 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1959 ZERO(HC_MAIN_IRQ_MASK_OFS);
1960 ZERO(MV_PCI_SERR_MASK);
1961 ZERO(PCI_IRQ_CAUSE_OFS);
1962 ZERO(PCI_IRQ_MASK_OFS);
1963 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1964 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1965 ZERO(MV_PCI_ERR_ATTRIBUTE);
1966 ZERO(MV_PCI_ERR_COMMAND);
1970 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1974 mv5_reset_flash(hpriv, mmio);
1976 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1978 tmp |= (1 << 5) | (1 << 6);
1979 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1983 * mv6_reset_hc - Perform the 6xxx global soft reset
1984 * @mmio: base address of the HBA
1986 * This routine only applies to 6xxx parts.
1989 * Inherited from caller.
1991 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1994 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1998 /* Following procedure defined in PCI "main command and status
2002 writel(t | STOP_PCI_MASTER, reg);
2004 for (i = 0; i < 1000; i++) {
2007 if (PCI_MASTER_EMPTY & t) {
2011 if (!(PCI_MASTER_EMPTY & t)) {
2012 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2020 writel(t | GLOB_SFT_RST, reg);
2023 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2025 if (!(GLOB_SFT_RST & t)) {
2026 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2031 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2034 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2037 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2039 if (GLOB_SFT_RST & t) {
2040 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2047 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2050 void __iomem *port_mmio;
2053 tmp = readl(mmio + MV_RESET_CFG);
2054 if ((tmp & (1 << 0)) == 0) {
2055 hpriv->signal[idx].amps = 0x7 << 8;
2056 hpriv->signal[idx].pre = 0x1 << 5;
2060 port_mmio = mv_port_base(mmio, idx);
2061 tmp = readl(port_mmio + PHY_MODE2);
2063 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2064 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2067 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2069 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2072 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2075 void __iomem *port_mmio = mv_port_base(mmio, port);
2077 u32 hp_flags = hpriv->hp_flags;
2079 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2081 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2084 if (fix_phy_mode2) {
2085 m2 = readl(port_mmio + PHY_MODE2);
2088 writel(m2, port_mmio + PHY_MODE2);
2092 m2 = readl(port_mmio + PHY_MODE2);
2093 m2 &= ~((1 << 16) | (1 << 31));
2094 writel(m2, port_mmio + PHY_MODE2);
2099 /* who knows what this magic does */
2100 tmp = readl(port_mmio + PHY_MODE3);
2103 writel(tmp, port_mmio + PHY_MODE3);
2105 if (fix_phy_mode4) {
2108 m4 = readl(port_mmio + PHY_MODE4);
2110 if (hp_flags & MV_HP_ERRATA_60X1B2)
2111 tmp = readl(port_mmio + 0x310);
2113 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2115 writel(m4, port_mmio + PHY_MODE4);
2117 if (hp_flags & MV_HP_ERRATA_60X1B2)
2118 writel(tmp, port_mmio + 0x310);
2121 /* Revert values of pre-emphasis and signal amps to the saved ones */
2122 m2 = readl(port_mmio + PHY_MODE2);
2124 m2 &= ~MV_M2_PREAMP_MASK;
2125 m2 |= hpriv->signal[port].amps;
2126 m2 |= hpriv->signal[port].pre;
2129 /* according to mvSata 3.6.1, some IIE values are fixed */
2130 if (IS_GEN_IIE(hpriv)) {
2135 writel(m2, port_mmio + PHY_MODE2);
2138 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2139 unsigned int port_no)
2141 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2143 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2145 if (IS_GEN_II(hpriv)) {
2146 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2147 ifctl |= (1 << 7); /* enable gen2i speed */
2148 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2149 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2152 udelay(25); /* allow reset propagation */
2154 /* Spec never mentions clearing the bit. Marvell's driver does
2155 * clear the bit, however.
2157 writelfl(0, port_mmio + EDMA_CMD_OFS);
2159 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2161 if (IS_GEN_I(hpriv))
2166 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2167 * @ap: ATA channel to manipulate
2169 * Part of this is taken from __sata_phy_reset and modified to
2170 * not sleep since this routine gets called from interrupt level.
2173 * Inherited from caller. This is coded to safe to call at
2174 * interrupt level, i.e. it does not sleep.
2176 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2177 unsigned long deadline)
2179 struct mv_port_priv *pp = ap->private_data;
2180 struct mv_host_priv *hpriv = ap->host->private_data;
2181 void __iomem *port_mmio = mv_ap_base(ap);
2185 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2189 u32 sstatus, serror, scontrol;
2191 mv_scr_read(ap, SCR_STATUS, &sstatus);
2192 mv_scr_read(ap, SCR_ERROR, &serror);
2193 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2194 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2195 "SCtrl 0x%08x\n", status, serror, scontrol);
2199 /* Issue COMRESET via SControl */
2201 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2204 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2208 sata_scr_read(ap, SCR_STATUS, &sstatus);
2209 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2213 } while (time_before(jiffies, deadline));
2215 /* work around errata */
2216 if (IS_GEN_II(hpriv) &&
2217 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2219 goto comreset_retry;
2223 u32 sstatus, serror, scontrol;
2225 mv_scr_read(ap, SCR_STATUS, &sstatus);
2226 mv_scr_read(ap, SCR_ERROR, &serror);
2227 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2228 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2229 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2233 if (ata_port_offline(ap)) {
2234 *class = ATA_DEV_NONE;
2238 /* even after SStatus reflects that device is ready,
2239 * it seems to take a while for link to be fully
2240 * established (and thus Status no longer 0x80/0x7F),
2241 * so we poll a bit for that, here.
2245 u8 drv_stat = ata_check_status(ap);
2246 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2251 if (time_after(jiffies, deadline))
2255 /* FIXME: if we passed the deadline, the following
2256 * code probably produces an invalid result
2259 /* finally, read device signature from TF registers */
2260 *class = ata_dev_try_classify(ap, 0, NULL);
2262 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2264 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2269 static int mv_prereset(struct ata_port *ap, unsigned long deadline)
2271 struct mv_port_priv *pp = ap->private_data;
2272 struct ata_eh_context *ehc = &ap->eh_context;
2275 rc = mv_stop_dma(ap);
2277 ehc->i.action |= ATA_EH_HARDRESET;
2279 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2280 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2281 ehc->i.action |= ATA_EH_HARDRESET;
2284 /* if we're about to do hardreset, nothing more to do */
2285 if (ehc->i.action & ATA_EH_HARDRESET)
2288 if (ata_port_online(ap))
2289 rc = ata_wait_ready(ap, deadline);
2296 static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2297 unsigned long deadline)
2299 struct mv_host_priv *hpriv = ap->host->private_data;
2300 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2304 mv_channel_reset(hpriv, mmio, ap->port_no);
2306 mv_phy_reset(ap, class, deadline);
2311 static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2315 /* print link status */
2316 sata_print_link_status(ap);
2319 sata_scr_read(ap, SCR_ERROR, &serr);
2320 sata_scr_write_flush(ap, SCR_ERROR, serr);
2322 /* bail out if no device is present */
2323 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2324 DPRINTK("EXIT, no device\n");
2328 /* set up device control */
2329 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2332 static void mv_error_handler(struct ata_port *ap)
2334 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2335 mv_hardreset, mv_postreset);
2338 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2340 mv_stop_dma(qc->ap);
2343 static void mv_eh_freeze(struct ata_port *ap)
2345 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2346 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2350 /* FIXME: handle coalescing completion events properly */
2352 shift = ap->port_no * 2;
2356 mask = 0x3 << shift;
2358 /* disable assertion of portN err, done events */
2359 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2360 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2363 static void mv_eh_thaw(struct ata_port *ap)
2365 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2366 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2367 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2368 void __iomem *port_mmio = mv_ap_base(ap);
2369 u32 tmp, mask, hc_irq_cause;
2370 unsigned int shift, hc_port_no = ap->port_no;
2372 /* FIXME: handle coalescing completion events properly */
2374 shift = ap->port_no * 2;
2380 mask = 0x3 << shift;
2382 /* clear EDMA errors on this port */
2383 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2385 /* clear pending irq events */
2386 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2387 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2388 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2389 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2391 /* enable assertion of portN err, done events */
2392 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2393 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2397 * mv_port_init - Perform some early initialization on a single port.
2398 * @port: libata data structure storing shadow register addresses
2399 * @port_mmio: base address of the port
2401 * Initialize shadow register mmio addresses, clear outstanding
2402 * interrupts on the port, and unmask interrupts for the future
2403 * start of the port.
2406 * Inherited from caller.
2408 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2410 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2413 /* PIO related setup
2415 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2417 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2418 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2419 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2420 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2421 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2422 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2424 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2425 /* special case: control/altstatus doesn't have ATA_REG_ address */
2426 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2429 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2431 /* Clear any currently outstanding port interrupt conditions */
2432 serr_ofs = mv_scr_offset(SCR_ERROR);
2433 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2434 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2436 /* unmask all EDMA error interrupts */
2437 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2439 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2440 readl(port_mmio + EDMA_CFG_OFS),
2441 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2442 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2445 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2447 struct pci_dev *pdev = to_pci_dev(host->dev);
2448 struct mv_host_priv *hpriv = host->private_data;
2449 u32 hp_flags = hpriv->hp_flags;
2453 hpriv->ops = &mv5xxx_ops;
2454 hp_flags |= MV_HP_GEN_I;
2456 switch (pdev->revision) {
2458 hp_flags |= MV_HP_ERRATA_50XXB0;
2461 hp_flags |= MV_HP_ERRATA_50XXB2;
2464 dev_printk(KERN_WARNING, &pdev->dev,
2465 "Applying 50XXB2 workarounds to unknown rev\n");
2466 hp_flags |= MV_HP_ERRATA_50XXB2;
2473 hpriv->ops = &mv5xxx_ops;
2474 hp_flags |= MV_HP_GEN_I;
2476 switch (pdev->revision) {
2478 hp_flags |= MV_HP_ERRATA_50XXB0;
2481 hp_flags |= MV_HP_ERRATA_50XXB2;
2484 dev_printk(KERN_WARNING, &pdev->dev,
2485 "Applying B2 workarounds to unknown rev\n");
2486 hp_flags |= MV_HP_ERRATA_50XXB2;
2493 hpriv->ops = &mv6xxx_ops;
2494 hp_flags |= MV_HP_GEN_II;
2496 switch (pdev->revision) {
2498 hp_flags |= MV_HP_ERRATA_60X1B2;
2501 hp_flags |= MV_HP_ERRATA_60X1C0;
2504 dev_printk(KERN_WARNING, &pdev->dev,
2505 "Applying B2 workarounds to unknown rev\n");
2506 hp_flags |= MV_HP_ERRATA_60X1B2;
2513 hpriv->ops = &mv6xxx_ops;
2514 hp_flags |= MV_HP_GEN_IIE;
2516 switch (pdev->revision) {
2518 hp_flags |= MV_HP_ERRATA_XX42A0;
2521 hp_flags |= MV_HP_ERRATA_60X1C0;
2524 dev_printk(KERN_WARNING, &pdev->dev,
2525 "Applying 60X1C0 workarounds to unknown rev\n");
2526 hp_flags |= MV_HP_ERRATA_60X1C0;
2532 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2536 hpriv->hp_flags = hp_flags;
2542 * mv_init_host - Perform some early initialization of the host.
2543 * @host: ATA host to initialize
2544 * @board_idx: controller index
2546 * If possible, do an early global reset of the host. Then do
2547 * our port init and clear/unmask all/relevant host interrupts.
2550 * Inherited from caller.
2552 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2554 int rc = 0, n_hc, port, hc;
2555 struct pci_dev *pdev = to_pci_dev(host->dev);
2556 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2557 struct mv_host_priv *hpriv = host->private_data;
2559 /* global interrupt mask */
2560 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2562 rc = mv_chip_id(host, board_idx);
2566 n_hc = mv_get_hc_count(host->ports[0]->flags);
2568 for (port = 0; port < host->n_ports; port++)
2569 hpriv->ops->read_preamp(hpriv, port, mmio);
2571 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2575 hpriv->ops->reset_flash(hpriv, mmio);
2576 hpriv->ops->reset_bus(pdev, mmio);
2577 hpriv->ops->enable_leds(hpriv, mmio);
2579 for (port = 0; port < host->n_ports; port++) {
2580 if (IS_GEN_II(hpriv)) {
2581 void __iomem *port_mmio = mv_port_base(mmio, port);
2583 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2584 ifctl |= (1 << 7); /* enable gen2i speed */
2585 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2586 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2589 hpriv->ops->phy_errata(hpriv, mmio, port);
2592 for (port = 0; port < host->n_ports; port++) {
2593 void __iomem *port_mmio = mv_port_base(mmio, port);
2594 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2597 for (hc = 0; hc < n_hc; hc++) {
2598 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2600 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2601 "(before clear)=0x%08x\n", hc,
2602 readl(hc_mmio + HC_CFG_OFS),
2603 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2605 /* Clear any currently outstanding hc interrupt conditions */
2606 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2609 /* Clear any currently outstanding host interrupt conditions */
2610 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2612 /* and unmask interrupt generation for host regs */
2613 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2615 if (IS_GEN_I(hpriv))
2616 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2618 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2620 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2621 "PCI int cause/mask=0x%08x/0x%08x\n",
2622 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2623 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2624 readl(mmio + PCI_IRQ_CAUSE_OFS),
2625 readl(mmio + PCI_IRQ_MASK_OFS));
2632 * mv_print_info - Dump key info to kernel log for perusal.
2633 * @host: ATA host to print info about
2635 * FIXME: complete this.
2638 * Inherited from caller.
2640 static void mv_print_info(struct ata_host *host)
2642 struct pci_dev *pdev = to_pci_dev(host->dev);
2643 struct mv_host_priv *hpriv = host->private_data;
2645 const char *scc_s, *gen;
2647 /* Use this to determine the HW stepping of the chip so we know
2648 * what errata to workaround
2650 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2653 else if (scc == 0x01)
2658 if (IS_GEN_I(hpriv))
2660 else if (IS_GEN_II(hpriv))
2662 else if (IS_GEN_IIE(hpriv))
2667 dev_printk(KERN_INFO, &pdev->dev,
2668 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2669 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2670 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2674 * mv_init_one - handle a positive probe of a Marvell host
2675 * @pdev: PCI device found
2676 * @ent: PCI device ID entry for the matched host
2679 * Inherited from caller.
2681 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2683 static int printed_version = 0;
2684 unsigned int board_idx = (unsigned int)ent->driver_data;
2685 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2686 struct ata_host *host;
2687 struct mv_host_priv *hpriv;
2690 if (!printed_version++)
2691 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2694 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2696 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2697 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2698 if (!host || !hpriv)
2700 host->private_data = hpriv;
2702 /* acquire resources */
2703 rc = pcim_enable_device(pdev);
2707 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2709 pcim_pin_device(pdev);
2712 host->iomap = pcim_iomap_table(pdev);
2714 rc = pci_go_64(pdev);
2718 /* initialize adapter */
2719 rc = mv_init_host(host, board_idx);
2723 /* Enable interrupts */
2724 if (msi && pci_enable_msi(pdev))
2727 mv_dump_pci_cfg(pdev, 0x68);
2728 mv_print_info(host);
2730 pci_set_master(pdev);
2731 pci_try_set_mwi(pdev);
2732 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2733 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2736 static int __init mv_init(void)
2738 return pci_register_driver(&mv_pci_driver);
2741 static void __exit mv_exit(void)
2743 pci_unregister_driver(&mv_pci_driver);
2746 MODULE_AUTHOR("Brett Russ");
2747 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2748 MODULE_LICENSE("GPL");
2749 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2750 MODULE_VERSION(DRV_VERSION);
2752 module_param(msi, int, 0444);
2753 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2755 module_init(mv_init);
2756 module_exit(mv_exit);