2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
33 probing/error handling in general. MUST HAVE.
35 3) Add hotplug support (easy, once new-EH support appears)
37 4) Add NCQ support (easy to intermediate, once new-EH support appears)
39 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41 6) Add port multiplier support (intermediate)
43 7) Test and verify 3.0 Gbps support
45 8) Develop a low-power-consumption strategy, and implement it.
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
63 13) Verify that 7042 is fully supported. I only have a 6042.
68 #include <linux/kernel.h>
69 #include <linux/module.h>
70 #include <linux/pci.h>
71 #include <linux/init.h>
72 #include <linux/blkdev.h>
73 #include <linux/delay.h>
74 #include <linux/interrupt.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <linux/libata.h>
81 #define DRV_NAME "sata_mv"
82 #define DRV_VERSION "0.81"
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
87 MV_IO_BAR = 2, /* offset 0x18: IO space */
88 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
94 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
95 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
96 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
97 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
98 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101 MV_SATAHC0_REG_BASE = 0x20000,
102 MV_FLASH_CTL = 0x1046c,
103 MV_GPIO_PORT_CTL = 0x104f0,
104 MV_RESET_CFG = 0x180d8,
106 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
107 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
114 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
116 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
117 * CRPB needs alignment on a 256B boundary. Size == 256B
118 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
125 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
128 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
129 MV_PORT_HC_SHIFT = 2,
130 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
136 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
138 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
141 CRQB_FLAG_READ = (1 << 0),
143 CRQB_CMD_ADDR_SHIFT = 8,
144 CRQB_CMD_CS = (0x2 << 11),
145 CRQB_CMD_LAST = (1 << 15),
147 CRPB_FLAG_STATUS_SHIFT = 8,
149 EPRD_FLAG_END_OF_TBL = (1 << 31),
151 /* PCI interface registers */
153 PCI_COMMAND_OFS = 0xc00,
155 PCI_MAIN_CMD_STS_OFS = 0xd30,
156 STOP_PCI_MASTER = (1 << 2),
157 PCI_MASTER_EMPTY = (1 << 3),
158 GLOB_SFT_RST = (1 << 4),
161 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
162 MV_PCI_DISC_TIMER = 0xd04,
163 MV_PCI_MSI_TRIGGER = 0xc38,
164 MV_PCI_SERR_MASK = 0xc28,
165 MV_PCI_XBAR_TMOUT = 0x1d04,
166 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
167 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
168 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
169 MV_PCI_ERR_COMMAND = 0x1d50,
171 PCI_IRQ_CAUSE_OFS = 0x1d58,
172 PCI_IRQ_MASK_OFS = 0x1d5c,
173 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
198 /* SATAHC registers */
201 HC_IRQ_CAUSE_OFS = 0x14,
202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
206 /* Shadow block registers */
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
219 SATA_INTERFACE_CTL = 0x050,
221 MV_M2_PREAMP_MASK = 0x7e0,
225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
226 EDMA_CFG_NCQ = (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc,
233 EDMA_ERR_D_PAR = (1 << 0),
234 EDMA_ERR_PRD_PAR = (1 << 1),
235 EDMA_ERR_DEV = (1 << 2),
236 EDMA_ERR_DEV_DCON = (1 << 3),
237 EDMA_ERR_DEV_CON = (1 << 4),
238 EDMA_ERR_SERR = (1 << 5),
239 EDMA_ERR_SELF_DIS = (1 << 7),
240 EDMA_ERR_BIST_ASYNC = (1 << 8),
241 EDMA_ERR_CRBQ_PAR = (1 << 9),
242 EDMA_ERR_CRPB_PAR = (1 << 10),
243 EDMA_ERR_INTRL_PAR = (1 << 11),
244 EDMA_ERR_IORDY = (1 << 12),
245 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
246 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
247 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
248 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
249 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
250 EDMA_ERR_TRANS_PROTO = (1 << 31),
251 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
252 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
253 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
254 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
255 EDMA_ERR_LNK_DATA_RX |
256 EDMA_ERR_LNK_DATA_TX |
257 EDMA_ERR_TRANS_PROTO),
259 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
260 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
262 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
263 EDMA_REQ_Q_PTR_SHIFT = 5,
265 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
266 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
267 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
268 EDMA_RSP_Q_PTR_SHIFT = 3,
275 EDMA_IORDY_TMOUT = 0x34,
278 /* Host private flags (hp_flags) */
279 MV_HP_FLAG_MSI = (1 << 0),
280 MV_HP_ERRATA_50XXB0 = (1 << 1),
281 MV_HP_ERRATA_50XXB2 = (1 << 2),
282 MV_HP_ERRATA_60X1B2 = (1 << 3),
283 MV_HP_ERRATA_60X1C0 = (1 << 4),
284 MV_HP_ERRATA_XX42A0 = (1 << 5),
285 MV_HP_50XX = (1 << 6),
286 MV_HP_GEN_IIE = (1 << 7),
288 /* Port private flags (pp_flags) */
289 MV_PP_FLAG_EDMA_EN = (1 << 0),
290 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
293 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
294 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
295 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
296 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
297 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
300 MV_DMA_BOUNDARY = 0xffffffffU,
302 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
304 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
317 /* Command ReQuest Block: 32B */
333 /* Command ResPonse Block: 8B */
340 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
348 struct mv_port_priv {
349 struct mv_crqb *crqb;
351 struct mv_crpb *crpb;
353 struct mv_sg *sg_tbl;
354 dma_addr_t sg_tbl_dma;
358 struct mv_port_signal {
365 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
367 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
368 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
370 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
372 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
373 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
376 struct mv_host_priv {
378 struct mv_port_signal signal[8];
379 const struct mv_hw_ops *ops;
382 static void mv_irq_clear(struct ata_port *ap);
383 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
384 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
385 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
386 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
387 static void mv_phy_reset(struct ata_port *ap);
388 static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
389 static int mv_port_start(struct ata_port *ap);
390 static void mv_port_stop(struct ata_port *ap);
391 static void mv_qc_prep(struct ata_queued_cmd *qc);
392 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
393 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
394 static void mv_eng_timeout(struct ata_port *ap);
395 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
397 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
399 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
400 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
402 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
404 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
405 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
407 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
409 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
410 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
412 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
414 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
415 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
416 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
417 unsigned int port_no);
418 static void mv_stop_and_reset(struct ata_port *ap);
420 static struct scsi_host_template mv_sht = {
421 .module = THIS_MODULE,
423 .ioctl = ata_scsi_ioctl,
424 .queuecommand = ata_scsi_queuecmd,
425 .can_queue = MV_USE_Q_DEPTH,
426 .this_id = ATA_SHT_THIS_ID,
427 .sg_tablesize = MV_MAX_SG_CT,
428 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
429 .emulated = ATA_SHT_EMULATED,
431 .proc_name = DRV_NAME,
432 .dma_boundary = MV_DMA_BOUNDARY,
433 .slave_configure = ata_scsi_slave_config,
434 .slave_destroy = ata_scsi_slave_destroy,
435 .bios_param = ata_std_bios_param,
438 static const struct ata_port_operations mv5_ops = {
439 .port_disable = ata_port_disable,
441 .tf_load = ata_tf_load,
442 .tf_read = ata_tf_read,
443 .check_status = ata_check_status,
444 .exec_command = ata_exec_command,
445 .dev_select = ata_std_dev_select,
447 .phy_reset = mv_phy_reset,
448 .cable_detect = ata_cable_sata,
450 .qc_prep = mv_qc_prep,
451 .qc_issue = mv_qc_issue,
452 .data_xfer = ata_data_xfer,
454 .eng_timeout = mv_eng_timeout,
456 .irq_clear = mv_irq_clear,
457 .irq_on = ata_irq_on,
458 .irq_ack = ata_irq_ack,
460 .scr_read = mv5_scr_read,
461 .scr_write = mv5_scr_write,
463 .port_start = mv_port_start,
464 .port_stop = mv_port_stop,
467 static const struct ata_port_operations mv6_ops = {
468 .port_disable = ata_port_disable,
470 .tf_load = ata_tf_load,
471 .tf_read = ata_tf_read,
472 .check_status = ata_check_status,
473 .exec_command = ata_exec_command,
474 .dev_select = ata_std_dev_select,
476 .phy_reset = mv_phy_reset,
477 .cable_detect = ata_cable_sata,
479 .qc_prep = mv_qc_prep,
480 .qc_issue = mv_qc_issue,
481 .data_xfer = ata_data_xfer,
483 .eng_timeout = mv_eng_timeout,
485 .irq_clear = mv_irq_clear,
486 .irq_on = ata_irq_on,
487 .irq_ack = ata_irq_ack,
489 .scr_read = mv_scr_read,
490 .scr_write = mv_scr_write,
492 .port_start = mv_port_start,
493 .port_stop = mv_port_stop,
496 static const struct ata_port_operations mv_iie_ops = {
497 .port_disable = ata_port_disable,
499 .tf_load = ata_tf_load,
500 .tf_read = ata_tf_read,
501 .check_status = ata_check_status,
502 .exec_command = ata_exec_command,
503 .dev_select = ata_std_dev_select,
505 .phy_reset = mv_phy_reset,
506 .cable_detect = ata_cable_sata,
508 .qc_prep = mv_qc_prep_iie,
509 .qc_issue = mv_qc_issue,
510 .data_xfer = ata_data_xfer,
512 .eng_timeout = mv_eng_timeout,
514 .irq_clear = mv_irq_clear,
515 .irq_on = ata_irq_on,
516 .irq_ack = ata_irq_ack,
518 .scr_read = mv_scr_read,
519 .scr_write = mv_scr_write,
521 .port_start = mv_port_start,
522 .port_stop = mv_port_stop,
525 static const struct ata_port_info mv_port_info[] = {
527 .flags = MV_COMMON_FLAGS,
528 .pio_mask = 0x1f, /* pio0-4 */
529 .udma_mask = 0x7f, /* udma0-6 */
530 .port_ops = &mv5_ops,
533 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
534 .pio_mask = 0x1f, /* pio0-4 */
535 .udma_mask = 0x7f, /* udma0-6 */
536 .port_ops = &mv5_ops,
539 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
540 .pio_mask = 0x1f, /* pio0-4 */
541 .udma_mask = 0x7f, /* udma0-6 */
542 .port_ops = &mv5_ops,
545 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
546 .pio_mask = 0x1f, /* pio0-4 */
547 .udma_mask = 0x7f, /* udma0-6 */
548 .port_ops = &mv6_ops,
551 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
553 .pio_mask = 0x1f, /* pio0-4 */
554 .udma_mask = 0x7f, /* udma0-6 */
555 .port_ops = &mv6_ops,
558 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
559 .pio_mask = 0x1f, /* pio0-4 */
560 .udma_mask = 0x7f, /* udma0-6 */
561 .port_ops = &mv_iie_ops,
564 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
565 .pio_mask = 0x1f, /* pio0-4 */
566 .udma_mask = 0x7f, /* udma0-6 */
567 .port_ops = &mv_iie_ops,
571 static const struct pci_device_id mv_pci_tbl[] = {
572 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
573 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
574 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
575 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
577 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
578 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
579 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
580 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
581 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
583 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
585 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
587 /* add Marvell 7042 support */
588 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
590 { } /* terminate list */
593 static struct pci_driver mv_pci_driver = {
595 .id_table = mv_pci_tbl,
596 .probe = mv_init_one,
597 .remove = ata_pci_remove_one,
600 static const struct mv_hw_ops mv5xxx_ops = {
601 .phy_errata = mv5_phy_errata,
602 .enable_leds = mv5_enable_leds,
603 .read_preamp = mv5_read_preamp,
604 .reset_hc = mv5_reset_hc,
605 .reset_flash = mv5_reset_flash,
606 .reset_bus = mv5_reset_bus,
609 static const struct mv_hw_ops mv6xxx_ops = {
610 .phy_errata = mv6_phy_errata,
611 .enable_leds = mv6_enable_leds,
612 .read_preamp = mv6_read_preamp,
613 .reset_hc = mv6_reset_hc,
614 .reset_flash = mv6_reset_flash,
615 .reset_bus = mv_reset_pci_bus,
621 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
624 /* move to PCI layer or libata core? */
625 static int pci_go_64(struct pci_dev *pdev)
629 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
630 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
632 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
634 dev_printk(KERN_ERR, &pdev->dev,
635 "64-bit DMA enable failed\n");
640 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
642 dev_printk(KERN_ERR, &pdev->dev,
643 "32-bit DMA enable failed\n");
646 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
648 dev_printk(KERN_ERR, &pdev->dev,
649 "32-bit consistent DMA enable failed\n");
661 static inline void writelfl(unsigned long data, void __iomem *addr)
664 (void) readl(addr); /* flush to avoid PCI posted write */
667 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
669 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
672 static inline unsigned int mv_hc_from_port(unsigned int port)
674 return port >> MV_PORT_HC_SHIFT;
677 static inline unsigned int mv_hardport_from_port(unsigned int port)
679 return port & MV_PORT_MASK;
682 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
685 return mv_hc_base(base, mv_hc_from_port(port));
688 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
690 return mv_hc_base_from_port(base, port) +
691 MV_SATAHC_ARBTR_REG_SZ +
692 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
695 static inline void __iomem *mv_ap_base(struct ata_port *ap)
697 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
700 static inline int mv_get_hc_count(unsigned long port_flags)
702 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
705 static void mv_irq_clear(struct ata_port *ap)
710 * mv_start_dma - Enable eDMA engine
711 * @base: port base address
712 * @pp: port private data
714 * Verify the local cache of the eDMA state is accurate with a
718 * Inherited from caller.
720 static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
722 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
723 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
724 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
726 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
730 * mv_stop_dma - Disable eDMA engine
731 * @ap: ATA channel to manipulate
733 * Verify the local cache of the eDMA state is accurate with a
737 * Inherited from caller.
739 static void mv_stop_dma(struct ata_port *ap)
741 void __iomem *port_mmio = mv_ap_base(ap);
742 struct mv_port_priv *pp = ap->private_data;
746 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
747 /* Disable EDMA if active. The disable bit auto clears.
749 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
750 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
752 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
755 /* now properly wait for the eDMA to stop */
756 for (i = 1000; i > 0; i--) {
757 reg = readl(port_mmio + EDMA_CMD_OFS);
758 if (!(EDMA_EN & reg)) {
765 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
766 /* FIXME: Consider doing a reset here to recover */
771 static void mv_dump_mem(void __iomem *start, unsigned bytes)
774 for (b = 0; b < bytes; ) {
775 DPRINTK("%p: ", start + b);
776 for (w = 0; b < bytes && w < 4; w++) {
777 printk("%08x ",readl(start + b));
785 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
790 for (b = 0; b < bytes; ) {
791 DPRINTK("%02x: ", b);
792 for (w = 0; b < bytes && w < 4; w++) {
793 (void) pci_read_config_dword(pdev,b,&dw);
801 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
802 struct pci_dev *pdev)
805 void __iomem *hc_base = mv_hc_base(mmio_base,
806 port >> MV_PORT_HC_SHIFT);
807 void __iomem *port_base;
808 int start_port, num_ports, p, start_hc, num_hcs, hc;
811 start_hc = start_port = 0;
812 num_ports = 8; /* shld be benign for 4 port devs */
815 start_hc = port >> MV_PORT_HC_SHIFT;
817 num_ports = num_hcs = 1;
819 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
820 num_ports > 1 ? num_ports - 1 : start_port);
823 DPRINTK("PCI config space regs:\n");
824 mv_dump_pci_cfg(pdev, 0x68);
826 DPRINTK("PCI regs:\n");
827 mv_dump_mem(mmio_base+0xc00, 0x3c);
828 mv_dump_mem(mmio_base+0xd00, 0x34);
829 mv_dump_mem(mmio_base+0xf00, 0x4);
830 mv_dump_mem(mmio_base+0x1d00, 0x6c);
831 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
832 hc_base = mv_hc_base(mmio_base, hc);
833 DPRINTK("HC regs (HC %i):\n", hc);
834 mv_dump_mem(hc_base, 0x1c);
836 for (p = start_port; p < start_port + num_ports; p++) {
837 port_base = mv_port_base(mmio_base, p);
838 DPRINTK("EDMA regs (port %i):\n",p);
839 mv_dump_mem(port_base, 0x54);
840 DPRINTK("SATA regs (port %i):\n",p);
841 mv_dump_mem(port_base+0x300, 0x60);
846 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
854 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
857 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
866 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
868 unsigned int ofs = mv_scr_offset(sc_reg_in);
870 if (0xffffffffU != ofs)
871 return readl(mv_ap_base(ap) + ofs);
876 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
878 unsigned int ofs = mv_scr_offset(sc_reg_in);
880 if (0xffffffffU != ofs)
881 writelfl(val, mv_ap_base(ap) + ofs);
884 static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
886 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
888 /* set up non-NCQ EDMA configuration */
889 cfg &= ~(1 << 9); /* disable equeue */
891 if (IS_GEN_I(hpriv)) {
892 cfg &= ~0x1f; /* clear queue depth */
893 cfg |= (1 << 8); /* enab config burst size mask */
896 else if (IS_GEN_II(hpriv)) {
897 cfg &= ~0x1f; /* clear queue depth */
898 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
899 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
902 else if (IS_GEN_IIE(hpriv)) {
903 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
904 cfg |= (1 << 22); /* enab 4-entry host queue cache */
905 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
906 cfg |= (1 << 18); /* enab early completion */
907 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
908 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
909 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
912 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
916 * mv_port_start - Port specific init/start routine.
917 * @ap: ATA channel to manipulate
919 * Allocate and point to DMA memory, init port private memory,
923 * Inherited from caller.
925 static int mv_port_start(struct ata_port *ap)
927 struct device *dev = ap->host->dev;
928 struct mv_host_priv *hpriv = ap->host->private_data;
929 struct mv_port_priv *pp;
930 void __iomem *port_mmio = mv_ap_base(ap);
935 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
939 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
943 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
945 rc = ata_pad_alloc(ap, dev);
949 /* First item in chunk of DMA memory:
950 * 32-slot command request table (CRQB), 32 bytes each in size
953 pp->crqb_dma = mem_dma;
955 mem_dma += MV_CRQB_Q_SZ;
958 * 32-slot command response table (CRPB), 8 bytes each in size
961 pp->crpb_dma = mem_dma;
963 mem_dma += MV_CRPB_Q_SZ;
966 * Table of scatter-gather descriptors (ePRD), 16 bytes each
969 pp->sg_tbl_dma = mem_dma;
971 mv_edma_cfg(hpriv, port_mmio);
973 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
974 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
975 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
977 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
978 writelfl(pp->crqb_dma & 0xffffffff,
979 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
981 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
983 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
985 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
986 writelfl(pp->crpb_dma & 0xffffffff,
987 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
989 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
991 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
992 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
994 /* Don't turn on EDMA here...do it before DMA commands only. Else
995 * we'll be unable to send non-data, PIO, etc due to restricted access
998 ap->private_data = pp;
1003 * mv_port_stop - Port specific cleanup/stop routine.
1004 * @ap: ATA channel to manipulate
1006 * Stop DMA, cleanup port memory.
1009 * This routine uses the host lock to protect the DMA stop.
1011 static void mv_port_stop(struct ata_port *ap)
1013 unsigned long flags;
1015 spin_lock_irqsave(&ap->host->lock, flags);
1017 spin_unlock_irqrestore(&ap->host->lock, flags);
1021 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1022 * @qc: queued command whose SG list to source from
1024 * Populate the SG list and mark the last entry.
1027 * Inherited from caller.
1029 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1031 struct mv_port_priv *pp = qc->ap->private_data;
1032 unsigned int n_sg = 0;
1033 struct scatterlist *sg;
1034 struct mv_sg *mv_sg;
1037 ata_for_each_sg(sg, qc) {
1038 dma_addr_t addr = sg_dma_address(sg);
1039 u32 sg_len = sg_dma_len(sg);
1041 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1042 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1043 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1045 if (ata_sg_is_last(sg, qc))
1046 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1055 static inline unsigned mv_inc_q_index(unsigned index)
1057 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1060 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1062 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1063 (last ? CRQB_CMD_LAST : 0);
1064 *cmdw = cpu_to_le16(tmp);
1068 * mv_qc_prep - Host specific command preparation.
1069 * @qc: queued command to prepare
1071 * This routine simply redirects to the general purpose routine
1072 * if command is not DMA. Else, it handles prep of the CRQB
1073 * (command request block), does some sanity checking, and calls
1074 * the SG load routine.
1077 * Inherited from caller.
1079 static void mv_qc_prep(struct ata_queued_cmd *qc)
1081 struct ata_port *ap = qc->ap;
1082 struct mv_port_priv *pp = ap->private_data;
1084 struct ata_taskfile *tf;
1088 if (ATA_PROT_DMA != qc->tf.protocol)
1091 /* Fill in command request block
1093 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1094 flags |= CRQB_FLAG_READ;
1095 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1096 flags |= qc->tag << CRQB_TAG_SHIFT;
1098 /* get current queue index from hardware */
1099 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1100 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1102 pp->crqb[in_index].sg_addr =
1103 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1104 pp->crqb[in_index].sg_addr_hi =
1105 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1106 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1108 cw = &pp->crqb[in_index].ata_cmd[0];
1111 /* Sadly, the CRQB cannot accomodate all registers--there are
1112 * only 11 bytes...so we must pick and choose required
1113 * registers based on the command. So, we drop feature and
1114 * hob_feature for [RW] DMA commands, but they are needed for
1115 * NCQ. NCQ will drop hob_nsect.
1117 switch (tf->command) {
1119 case ATA_CMD_READ_EXT:
1121 case ATA_CMD_WRITE_EXT:
1122 case ATA_CMD_WRITE_FUA_EXT:
1123 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1125 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1126 case ATA_CMD_FPDMA_READ:
1127 case ATA_CMD_FPDMA_WRITE:
1128 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1129 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1131 #endif /* FIXME: remove this line when NCQ added */
1133 /* The only other commands EDMA supports in non-queued and
1134 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1135 * of which are defined/used by Linux. If we get here, this
1136 * driver needs work.
1138 * FIXME: modify libata to give qc_prep a return value and
1139 * return error here.
1141 BUG_ON(tf->command);
1144 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1145 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1146 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1147 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1148 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1149 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1150 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1151 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1152 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1154 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1160 * mv_qc_prep_iie - Host specific command preparation.
1161 * @qc: queued command to prepare
1163 * This routine simply redirects to the general purpose routine
1164 * if command is not DMA. Else, it handles prep of the CRQB
1165 * (command request block), does some sanity checking, and calls
1166 * the SG load routine.
1169 * Inherited from caller.
1171 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1173 struct ata_port *ap = qc->ap;
1174 struct mv_port_priv *pp = ap->private_data;
1175 struct mv_crqb_iie *crqb;
1176 struct ata_taskfile *tf;
1180 if (ATA_PROT_DMA != qc->tf.protocol)
1183 /* Fill in Gen IIE command request block
1185 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1186 flags |= CRQB_FLAG_READ;
1188 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1189 flags |= qc->tag << CRQB_TAG_SHIFT;
1191 /* get current queue index from hardware */
1192 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1193 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1195 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1196 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1197 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1198 crqb->flags = cpu_to_le32(flags);
1201 crqb->ata_cmd[0] = cpu_to_le32(
1202 (tf->command << 16) |
1205 crqb->ata_cmd[1] = cpu_to_le32(
1211 crqb->ata_cmd[2] = cpu_to_le32(
1212 (tf->hob_lbal << 0) |
1213 (tf->hob_lbam << 8) |
1214 (tf->hob_lbah << 16) |
1215 (tf->hob_feature << 24)
1217 crqb->ata_cmd[3] = cpu_to_le32(
1219 (tf->hob_nsect << 8)
1222 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1228 * mv_qc_issue - Initiate a command to the host
1229 * @qc: queued command to start
1231 * This routine simply redirects to the general purpose routine
1232 * if command is not DMA. Else, it sanity checks our local
1233 * caches of the request producer/consumer indices then enables
1234 * DMA and bumps the request producer index.
1237 * Inherited from caller.
1239 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1241 void __iomem *port_mmio = mv_ap_base(qc->ap);
1242 struct mv_port_priv *pp = qc->ap->private_data;
1246 if (ATA_PROT_DMA != qc->tf.protocol) {
1247 /* We're about to send a non-EDMA capable command to the
1248 * port. Turn off EDMA so there won't be problems accessing
1249 * shadow block, etc registers.
1251 mv_stop_dma(qc->ap);
1252 return ata_qc_issue_prot(qc);
1255 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1256 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1258 /* until we do queuing, the queue should be empty at this point */
1259 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1260 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1262 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1264 mv_start_dma(port_mmio, pp);
1266 /* and write the request in pointer to kick the EDMA to life */
1267 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1268 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1269 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1275 * mv_get_crpb_status - get status from most recently completed cmd
1276 * @ap: ATA channel to manipulate
1278 * This routine is for use when the port is in DMA mode, when it
1279 * will be using the CRPB (command response block) method of
1280 * returning command completion information. We check indices
1281 * are good, grab status, and bump the response consumer index to
1282 * prove that we're up to date.
1285 * Inherited from caller.
1287 static u8 mv_get_crpb_status(struct ata_port *ap)
1289 void __iomem *port_mmio = mv_ap_base(ap);
1290 struct mv_port_priv *pp = ap->private_data;
1295 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1296 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1298 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1299 >> CRPB_FLAG_STATUS_SHIFT;
1301 /* increment our consumer index... */
1302 out_index = mv_inc_q_index(out_index);
1304 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1305 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1306 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1308 /* write out our inc'd consumer index so EDMA knows we're caught up */
1309 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1310 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1311 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1313 /* Return ATA status register for completed CRPB */
1318 * mv_err_intr - Handle error interrupts on the port
1319 * @ap: ATA channel to manipulate
1320 * @reset_allowed: bool: 0 == don't trigger from reset here
1322 * In most cases, just clear the interrupt and move on. However,
1323 * some cases require an eDMA reset, which is done right before
1324 * the COMRESET in mv_phy_reset(). The SERR case requires a
1325 * clear of pending errors in the SATA SERROR register. Finally,
1326 * if the port disabled DMA, update our cached copy to match.
1329 * Inherited from caller.
1331 static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1333 void __iomem *port_mmio = mv_ap_base(ap);
1334 u32 edma_err_cause, serr = 0;
1336 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1338 if (EDMA_ERR_SERR & edma_err_cause) {
1339 sata_scr_read(ap, SCR_ERROR, &serr);
1340 sata_scr_write_flush(ap, SCR_ERROR, serr);
1342 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1343 struct mv_port_priv *pp = ap->private_data;
1344 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1346 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1347 "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
1349 /* Clear EDMA now that SERR cleanup done */
1350 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1352 /* check for fatal here and recover if needed */
1353 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1354 mv_stop_and_reset(ap);
1358 * mv_host_intr - Handle all interrupts on the given host controller
1359 * @host: host specific structure
1360 * @relevant: port error bits relevant to this host controller
1361 * @hc: which host controller we're to look at
1363 * Read then write clear the HC interrupt status then walk each
1364 * port connected to the HC and see if it needs servicing. Port
1365 * success ints are reported in the HC interrupt status reg, the
1366 * port error ints are reported in the higher level main
1367 * interrupt status register and thus are passed in via the
1368 * 'relevant' argument.
1371 * Inherited from caller.
1373 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1375 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1376 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1377 struct ata_queued_cmd *qc;
1379 int shift, port, port0, hard_port, handled;
1380 unsigned int err_mask;
1385 port0 = MV_PORTS_PER_HC;
1387 /* we'll need the HC success int register in most cases */
1388 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1390 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1392 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1393 hc,relevant,hc_irq_cause);
1395 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1397 struct ata_port *ap = host->ports[port];
1398 struct mv_port_priv *pp = ap->private_data;
1400 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1401 handled = 0; /* ensure ata_status is set if handled++ */
1403 /* Note that DEV_IRQ might happen spuriously during EDMA,
1404 * and should be ignored in such cases.
1405 * The cause of this is still under investigation.
1407 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1408 /* EDMA: check for response queue interrupt */
1409 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1410 ata_status = mv_get_crpb_status(ap);
1414 /* PIO: check for device (drive) interrupt */
1415 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1416 ata_status = readb(ap->ioaddr.status_addr);
1418 /* ignore spurious intr if drive still BUSY */
1419 if (ata_status & ATA_BUSY) {
1426 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1429 err_mask = ac_err_mask(ata_status);
1431 shift = port << 1; /* (port * 2) */
1432 if (port >= MV_PORTS_PER_HC) {
1433 shift++; /* skip bit 8 in the HC Main IRQ reg */
1435 if ((PORT0_ERR << shift) & relevant) {
1437 err_mask |= AC_ERR_OTHER;
1442 qc = ata_qc_from_tag(ap, ap->active_tag);
1443 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1444 VPRINTK("port %u IRQ found for qc, "
1445 "ata_status 0x%x\n", port,ata_status);
1446 /* mark qc status appropriately */
1447 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1448 qc->err_mask |= err_mask;
1449 ata_qc_complete(qc);
1460 * @dev_instance: private data; in this case the host structure
1463 * Read the read only register to determine if any host
1464 * controllers have pending interrupts. If so, call lower level
1465 * routine to handle. Also check for PCI errors which are only
1469 * This routine holds the host lock while processing pending
1472 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1474 struct ata_host *host = dev_instance;
1475 unsigned int hc, handled = 0, n_hcs;
1476 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1477 struct mv_host_priv *hpriv;
1480 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1482 /* check the cases where we either have nothing pending or have read
1483 * a bogus register value which can indicate HW removal or PCI fault
1485 if (!irq_stat || (0xffffffffU == irq_stat))
1488 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1489 spin_lock(&host->lock);
1491 for (hc = 0; hc < n_hcs; hc++) {
1492 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1494 mv_host_intr(host, relevant, hc);
1499 hpriv = host->private_data;
1500 if (IS_60XX(hpriv)) {
1501 /* deal with the interrupt coalescing bits */
1502 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1503 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1504 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1505 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1509 if (PCI_ERR & irq_stat) {
1510 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1511 readl(mmio + PCI_IRQ_CAUSE_OFS));
1513 DPRINTK("All regs @ PCI error\n");
1514 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1516 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1519 spin_unlock(&host->lock);
1521 return IRQ_RETVAL(handled);
1524 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1526 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1527 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1529 return hc_mmio + ofs;
1532 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1536 switch (sc_reg_in) {
1540 ofs = sc_reg_in * sizeof(u32);
1549 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1551 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1552 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1553 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1555 if (ofs != 0xffffffffU)
1556 return readl(addr + ofs);
1561 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1563 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1564 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1565 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1567 if (ofs != 0xffffffffU)
1568 writelfl(val, addr + ofs);
1571 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1576 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1578 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1581 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1583 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1586 mv_reset_pci_bus(pdev, mmio);
1589 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1591 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1594 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1597 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1600 tmp = readl(phy_mmio + MV5_PHY_MODE);
1602 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1603 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1606 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1610 writel(0, mmio + MV_GPIO_PORT_CTL);
1612 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1614 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1616 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1619 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1622 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1623 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1625 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1628 tmp = readl(phy_mmio + MV5_LT_MODE);
1630 writel(tmp, phy_mmio + MV5_LT_MODE);
1632 tmp = readl(phy_mmio + MV5_PHY_CTL);
1635 writel(tmp, phy_mmio + MV5_PHY_CTL);
1638 tmp = readl(phy_mmio + MV5_PHY_MODE);
1640 tmp |= hpriv->signal[port].pre;
1641 tmp |= hpriv->signal[port].amps;
1642 writel(tmp, phy_mmio + MV5_PHY_MODE);
1647 #define ZERO(reg) writel(0, port_mmio + (reg))
1648 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1651 void __iomem *port_mmio = mv_port_base(mmio, port);
1653 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1655 mv_channel_reset(hpriv, mmio, port);
1657 ZERO(0x028); /* command */
1658 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1659 ZERO(0x004); /* timer */
1660 ZERO(0x008); /* irq err cause */
1661 ZERO(0x00c); /* irq err mask */
1662 ZERO(0x010); /* rq bah */
1663 ZERO(0x014); /* rq inp */
1664 ZERO(0x018); /* rq outp */
1665 ZERO(0x01c); /* respq bah */
1666 ZERO(0x024); /* respq outp */
1667 ZERO(0x020); /* respq inp */
1668 ZERO(0x02c); /* test control */
1669 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1673 #define ZERO(reg) writel(0, hc_mmio + (reg))
1674 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1677 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1685 tmp = readl(hc_mmio + 0x20);
1688 writel(tmp, hc_mmio + 0x20);
1692 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1695 unsigned int hc, port;
1697 for (hc = 0; hc < n_hc; hc++) {
1698 for (port = 0; port < MV_PORTS_PER_HC; port++)
1699 mv5_reset_hc_port(hpriv, mmio,
1700 (hc * MV_PORTS_PER_HC) + port);
1702 mv5_reset_one_hc(hpriv, mmio, hc);
1709 #define ZERO(reg) writel(0, mmio + (reg))
1710 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1714 tmp = readl(mmio + MV_PCI_MODE);
1716 writel(tmp, mmio + MV_PCI_MODE);
1718 ZERO(MV_PCI_DISC_TIMER);
1719 ZERO(MV_PCI_MSI_TRIGGER);
1720 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1721 ZERO(HC_MAIN_IRQ_MASK_OFS);
1722 ZERO(MV_PCI_SERR_MASK);
1723 ZERO(PCI_IRQ_CAUSE_OFS);
1724 ZERO(PCI_IRQ_MASK_OFS);
1725 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1726 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1727 ZERO(MV_PCI_ERR_ATTRIBUTE);
1728 ZERO(MV_PCI_ERR_COMMAND);
1732 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1736 mv5_reset_flash(hpriv, mmio);
1738 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1740 tmp |= (1 << 5) | (1 << 6);
1741 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1745 * mv6_reset_hc - Perform the 6xxx global soft reset
1746 * @mmio: base address of the HBA
1748 * This routine only applies to 6xxx parts.
1751 * Inherited from caller.
1753 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1756 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1760 /* Following procedure defined in PCI "main command and status
1764 writel(t | STOP_PCI_MASTER, reg);
1766 for (i = 0; i < 1000; i++) {
1769 if (PCI_MASTER_EMPTY & t) {
1773 if (!(PCI_MASTER_EMPTY & t)) {
1774 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1782 writel(t | GLOB_SFT_RST, reg);
1785 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1787 if (!(GLOB_SFT_RST & t)) {
1788 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1793 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1796 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1799 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1801 if (GLOB_SFT_RST & t) {
1802 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1809 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1812 void __iomem *port_mmio;
1815 tmp = readl(mmio + MV_RESET_CFG);
1816 if ((tmp & (1 << 0)) == 0) {
1817 hpriv->signal[idx].amps = 0x7 << 8;
1818 hpriv->signal[idx].pre = 0x1 << 5;
1822 port_mmio = mv_port_base(mmio, idx);
1823 tmp = readl(port_mmio + PHY_MODE2);
1825 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1826 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1829 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1831 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1834 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1837 void __iomem *port_mmio = mv_port_base(mmio, port);
1839 u32 hp_flags = hpriv->hp_flags;
1841 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1843 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1846 if (fix_phy_mode2) {
1847 m2 = readl(port_mmio + PHY_MODE2);
1850 writel(m2, port_mmio + PHY_MODE2);
1854 m2 = readl(port_mmio + PHY_MODE2);
1855 m2 &= ~((1 << 16) | (1 << 31));
1856 writel(m2, port_mmio + PHY_MODE2);
1861 /* who knows what this magic does */
1862 tmp = readl(port_mmio + PHY_MODE3);
1865 writel(tmp, port_mmio + PHY_MODE3);
1867 if (fix_phy_mode4) {
1870 m4 = readl(port_mmio + PHY_MODE4);
1872 if (hp_flags & MV_HP_ERRATA_60X1B2)
1873 tmp = readl(port_mmio + 0x310);
1875 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1877 writel(m4, port_mmio + PHY_MODE4);
1879 if (hp_flags & MV_HP_ERRATA_60X1B2)
1880 writel(tmp, port_mmio + 0x310);
1883 /* Revert values of pre-emphasis and signal amps to the saved ones */
1884 m2 = readl(port_mmio + PHY_MODE2);
1886 m2 &= ~MV_M2_PREAMP_MASK;
1887 m2 |= hpriv->signal[port].amps;
1888 m2 |= hpriv->signal[port].pre;
1891 /* according to mvSata 3.6.1, some IIE values are fixed */
1892 if (IS_GEN_IIE(hpriv)) {
1897 writel(m2, port_mmio + PHY_MODE2);
1900 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1901 unsigned int port_no)
1903 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1905 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1907 if (IS_60XX(hpriv)) {
1908 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1909 ifctl |= (1 << 7); /* enable gen2i speed */
1910 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1911 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1914 udelay(25); /* allow reset propagation */
1916 /* Spec never mentions clearing the bit. Marvell's driver does
1917 * clear the bit, however.
1919 writelfl(0, port_mmio + EDMA_CMD_OFS);
1921 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1927 static void mv_stop_and_reset(struct ata_port *ap)
1929 struct mv_host_priv *hpriv = ap->host->private_data;
1930 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1934 mv_channel_reset(hpriv, mmio, ap->port_no);
1936 __mv_phy_reset(ap, 0);
1939 static inline void __msleep(unsigned int msec, int can_sleep)
1948 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1949 * @ap: ATA channel to manipulate
1951 * Part of this is taken from __sata_phy_reset and modified to
1952 * not sleep since this routine gets called from interrupt level.
1955 * Inherited from caller. This is coded to safe to call at
1956 * interrupt level, i.e. it does not sleep.
1958 static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1960 struct mv_port_priv *pp = ap->private_data;
1961 struct mv_host_priv *hpriv = ap->host->private_data;
1962 void __iomem *port_mmio = mv_ap_base(ap);
1963 struct ata_taskfile tf;
1964 struct ata_device *dev = &ap->device[0];
1965 unsigned long timeout;
1969 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1971 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1972 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1973 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1975 /* Issue COMRESET via SControl */
1977 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1978 __msleep(1, can_sleep);
1980 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1981 __msleep(20, can_sleep);
1983 timeout = jiffies + msecs_to_jiffies(200);
1985 sata_scr_read(ap, SCR_STATUS, &sstatus);
1986 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
1989 __msleep(1, can_sleep);
1990 } while (time_before(jiffies, timeout));
1992 /* work around errata */
1993 if (IS_60XX(hpriv) &&
1994 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1996 goto comreset_retry;
1998 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1999 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2000 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2002 if (ata_port_online(ap)) {
2005 sata_scr_read(ap, SCR_STATUS, &sstatus);
2006 ata_port_printk(ap, KERN_INFO,
2007 "no device found (phy stat %08x)\n", sstatus);
2008 ata_port_disable(ap);
2012 /* even after SStatus reflects that device is ready,
2013 * it seems to take a while for link to be fully
2014 * established (and thus Status no longer 0x80/0x7F),
2015 * so we poll a bit for that, here.
2019 u8 drv_stat = ata_check_status(ap);
2020 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2022 __msleep(500, can_sleep);
2027 tf.lbah = readb(ap->ioaddr.lbah_addr);
2028 tf.lbam = readb(ap->ioaddr.lbam_addr);
2029 tf.lbal = readb(ap->ioaddr.lbal_addr);
2030 tf.nsect = readb(ap->ioaddr.nsect_addr);
2032 dev->class = ata_dev_classify(&tf);
2033 if (!ata_dev_enabled(dev)) {
2034 VPRINTK("Port disabled post-sig: No device present.\n");
2035 ata_port_disable(ap);
2038 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2040 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2045 static void mv_phy_reset(struct ata_port *ap)
2047 __mv_phy_reset(ap, 1);
2051 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2052 * @ap: ATA channel to manipulate
2054 * Intent is to clear all pending error conditions, reset the
2055 * chip/bus, fail the command, and move on.
2058 * This routine holds the host lock while failing the command.
2060 static void mv_eng_timeout(struct ata_port *ap)
2062 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2063 struct ata_queued_cmd *qc;
2064 unsigned long flags;
2066 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2067 DPRINTK("All regs @ start of eng_timeout\n");
2068 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
2070 qc = ata_qc_from_tag(ap, ap->active_tag);
2071 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2072 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2074 spin_lock_irqsave(&ap->host->lock, flags);
2076 mv_stop_and_reset(ap);
2077 spin_unlock_irqrestore(&ap->host->lock, flags);
2079 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2080 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2081 qc->err_mask |= AC_ERR_TIMEOUT;
2082 ata_eh_qc_complete(qc);
2087 * mv_port_init - Perform some early initialization on a single port.
2088 * @port: libata data structure storing shadow register addresses
2089 * @port_mmio: base address of the port
2091 * Initialize shadow register mmio addresses, clear outstanding
2092 * interrupts on the port, and unmask interrupts for the future
2093 * start of the port.
2096 * Inherited from caller.
2098 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2100 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2103 /* PIO related setup
2105 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2107 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2108 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2109 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2110 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2111 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2112 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2114 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2115 /* special case: control/altstatus doesn't have ATA_REG_ address */
2116 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2119 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2121 /* Clear any currently outstanding port interrupt conditions */
2122 serr_ofs = mv_scr_offset(SCR_ERROR);
2123 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2124 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2126 /* unmask all EDMA error interrupts */
2127 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2129 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2130 readl(port_mmio + EDMA_CFG_OFS),
2131 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2132 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2135 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2137 struct pci_dev *pdev = to_pci_dev(host->dev);
2138 struct mv_host_priv *hpriv = host->private_data;
2140 u32 hp_flags = hpriv->hp_flags;
2142 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2146 hpriv->ops = &mv5xxx_ops;
2147 hp_flags |= MV_HP_50XX;
2151 hp_flags |= MV_HP_ERRATA_50XXB0;
2154 hp_flags |= MV_HP_ERRATA_50XXB2;
2157 dev_printk(KERN_WARNING, &pdev->dev,
2158 "Applying 50XXB2 workarounds to unknown rev\n");
2159 hp_flags |= MV_HP_ERRATA_50XXB2;
2166 hpriv->ops = &mv5xxx_ops;
2167 hp_flags |= MV_HP_50XX;
2171 hp_flags |= MV_HP_ERRATA_50XXB0;
2174 hp_flags |= MV_HP_ERRATA_50XXB2;
2177 dev_printk(KERN_WARNING, &pdev->dev,
2178 "Applying B2 workarounds to unknown rev\n");
2179 hp_flags |= MV_HP_ERRATA_50XXB2;
2186 hpriv->ops = &mv6xxx_ops;
2190 hp_flags |= MV_HP_ERRATA_60X1B2;
2193 hp_flags |= MV_HP_ERRATA_60X1C0;
2196 dev_printk(KERN_WARNING, &pdev->dev,
2197 "Applying B2 workarounds to unknown rev\n");
2198 hp_flags |= MV_HP_ERRATA_60X1B2;
2205 hpriv->ops = &mv6xxx_ops;
2207 hp_flags |= MV_HP_GEN_IIE;
2211 hp_flags |= MV_HP_ERRATA_XX42A0;
2214 hp_flags |= MV_HP_ERRATA_60X1C0;
2217 dev_printk(KERN_WARNING, &pdev->dev,
2218 "Applying 60X1C0 workarounds to unknown rev\n");
2219 hp_flags |= MV_HP_ERRATA_60X1C0;
2225 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2229 hpriv->hp_flags = hp_flags;
2235 * mv_init_host - Perform some early initialization of the host.
2236 * @host: ATA host to initialize
2237 * @board_idx: controller index
2239 * If possible, do an early global reset of the host. Then do
2240 * our port init and clear/unmask all/relevant host interrupts.
2243 * Inherited from caller.
2245 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2247 int rc = 0, n_hc, port, hc;
2248 struct pci_dev *pdev = to_pci_dev(host->dev);
2249 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2250 struct mv_host_priv *hpriv = host->private_data;
2252 /* global interrupt mask */
2253 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2255 rc = mv_chip_id(host, board_idx);
2259 n_hc = mv_get_hc_count(host->ports[0]->flags);
2261 for (port = 0; port < host->n_ports; port++)
2262 hpriv->ops->read_preamp(hpriv, port, mmio);
2264 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2268 hpriv->ops->reset_flash(hpriv, mmio);
2269 hpriv->ops->reset_bus(pdev, mmio);
2270 hpriv->ops->enable_leds(hpriv, mmio);
2272 for (port = 0; port < host->n_ports; port++) {
2273 if (IS_60XX(hpriv)) {
2274 void __iomem *port_mmio = mv_port_base(mmio, port);
2276 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2277 ifctl |= (1 << 7); /* enable gen2i speed */
2278 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2279 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2282 hpriv->ops->phy_errata(hpriv, mmio, port);
2285 for (port = 0; port < host->n_ports; port++) {
2286 void __iomem *port_mmio = mv_port_base(mmio, port);
2287 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2290 for (hc = 0; hc < n_hc; hc++) {
2291 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2293 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2294 "(before clear)=0x%08x\n", hc,
2295 readl(hc_mmio + HC_CFG_OFS),
2296 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2298 /* Clear any currently outstanding hc interrupt conditions */
2299 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2302 /* Clear any currently outstanding host interrupt conditions */
2303 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2305 /* and unmask interrupt generation for host regs */
2306 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2309 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2311 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2313 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2314 "PCI int cause/mask=0x%08x/0x%08x\n",
2315 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2316 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2317 readl(mmio + PCI_IRQ_CAUSE_OFS),
2318 readl(mmio + PCI_IRQ_MASK_OFS));
2325 * mv_print_info - Dump key info to kernel log for perusal.
2326 * @host: ATA host to print info about
2328 * FIXME: complete this.
2331 * Inherited from caller.
2333 static void mv_print_info(struct ata_host *host)
2335 struct pci_dev *pdev = to_pci_dev(host->dev);
2336 struct mv_host_priv *hpriv = host->private_data;
2340 /* Use this to determine the HW stepping of the chip so we know
2341 * what errata to workaround
2343 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2345 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2348 else if (scc == 0x01)
2353 dev_printk(KERN_INFO, &pdev->dev,
2354 "%u slots %u ports %s mode IRQ via %s\n",
2355 (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2356 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2360 * mv_init_one - handle a positive probe of a Marvell host
2361 * @pdev: PCI device found
2362 * @ent: PCI device ID entry for the matched host
2365 * Inherited from caller.
2367 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2369 static int printed_version = 0;
2370 unsigned int board_idx = (unsigned int)ent->driver_data;
2371 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2372 struct ata_host *host;
2373 struct mv_host_priv *hpriv;
2376 if (!printed_version++)
2377 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2380 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2382 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2383 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2384 if (!host || !hpriv)
2386 host->private_data = hpriv;
2388 /* acquire resources */
2389 rc = pcim_enable_device(pdev);
2393 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2395 pcim_pin_device(pdev);
2398 host->iomap = pcim_iomap_table(pdev);
2400 rc = pci_go_64(pdev);
2404 /* initialize adapter */
2405 rc = mv_init_host(host, board_idx);
2409 /* Enable interrupts */
2410 if (msi && pci_enable_msi(pdev))
2413 mv_dump_pci_cfg(pdev, 0x68);
2414 mv_print_info(host);
2416 pci_set_master(pdev);
2417 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2421 static int __init mv_init(void)
2423 return pci_register_driver(&mv_pci_driver);
2426 static void __exit mv_exit(void)
2428 pci_unregister_driver(&mv_pci_driver);
2431 MODULE_AUTHOR("Brett Russ");
2432 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2433 MODULE_LICENSE("GPL");
2434 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2435 MODULE_VERSION(DRV_VERSION);
2437 module_param(msi, int, 0444);
2438 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2440 module_init(mv_init);
2441 module_exit(mv_exit);