2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Improve/fix IRQ and error handling sequences.
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42 6) Add port multiplier support (intermediate)
44 8) Develop a low-power-consumption strategy, and implement it.
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
102 MV_SATAHC0_REG_BASE = 0x20000,
103 MV_FLASH_CTL = 0x1046c,
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
141 CRQB_FLAG_READ = (1 << 0),
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
149 CRPB_FLAG_STATUS_SHIFT = 8,
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
155 /* PCI interface registers */
157 PCI_COMMAND_OFS = 0xc00,
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
210 /* SATAHC registers */
213 HC_IRQ_CAUSE_OFS = 0x14,
214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
218 /* Shadow block registers */
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
232 SATA_INTERFACE_CTL = 0x050,
234 MV_M2_PREAMP_MASK = 0x7e0,
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
322 EDMA_RSP_Q_PTR_SHIFT = 3,
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
329 EDMA_IORDY_TMOUT = 0x34,
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
338 MV_HP_ERRATA_XX42A0 = (1 << 5),
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
344 /* Port private flags (pp_flags) */
345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
350 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
359 MV_DMA_BOUNDARY = 0xffffU,
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
366 /* ditto, for response queue */
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
381 /* Command ReQuest Block: 32B */
397 /* Command ResPonse Block: 8B */
404 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
412 struct mv_port_priv {
413 struct mv_crqb *crqb;
415 struct mv_crpb *crpb;
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
420 unsigned int req_idx;
421 unsigned int resp_idx;
426 struct mv_port_signal {
431 struct mv_host_priv {
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
464 static void mv_irq_clear(struct ata_port *ap);
465 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
466 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
467 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
468 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
469 static int mv_port_start(struct ata_port *ap);
470 static void mv_port_stop(struct ata_port *ap);
471 static void mv_qc_prep(struct ata_queued_cmd *qc);
472 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
473 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
474 static void mv_error_handler(struct ata_port *ap);
475 static void mv_eh_freeze(struct ata_port *ap);
476 static void mv_eh_thaw(struct ata_port *ap);
477 static void mv6_dev_config(struct ata_device *dev);
479 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
481 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
482 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
484 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
486 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
487 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
489 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
491 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
492 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
494 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
496 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
497 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
499 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
501 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
502 void __iomem *mmio, unsigned int n_hc);
503 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
505 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
506 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
507 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
508 unsigned int port_no);
509 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
510 void __iomem *port_mmio, int want_ncq);
511 static int __mv_stop_dma(struct ata_port *ap);
513 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
514 * because we have to allow room for worst case splitting of
515 * PRDs for 64K boundaries in mv_fill_sg().
517 static struct scsi_host_template mv5_sht = {
518 .module = THIS_MODULE,
520 .ioctl = ata_scsi_ioctl,
521 .queuecommand = ata_scsi_queuecmd,
522 .can_queue = ATA_DEF_QUEUE,
523 .this_id = ATA_SHT_THIS_ID,
524 .sg_tablesize = MV_MAX_SG_CT / 2,
525 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
526 .emulated = ATA_SHT_EMULATED,
528 .proc_name = DRV_NAME,
529 .dma_boundary = MV_DMA_BOUNDARY,
530 .slave_configure = ata_scsi_slave_config,
531 .slave_destroy = ata_scsi_slave_destroy,
532 .bios_param = ata_std_bios_param,
535 static struct scsi_host_template mv6_sht = {
536 .module = THIS_MODULE,
538 .ioctl = ata_scsi_ioctl,
539 .queuecommand = ata_scsi_queuecmd,
540 .change_queue_depth = ata_scsi_change_queue_depth,
541 .can_queue = MV_MAX_Q_DEPTH - 1,
542 .this_id = ATA_SHT_THIS_ID,
543 .sg_tablesize = MV_MAX_SG_CT / 2,
544 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
545 .emulated = ATA_SHT_EMULATED,
547 .proc_name = DRV_NAME,
548 .dma_boundary = MV_DMA_BOUNDARY,
549 .slave_configure = ata_scsi_slave_config,
550 .slave_destroy = ata_scsi_slave_destroy,
551 .bios_param = ata_std_bios_param,
554 static const struct ata_port_operations mv5_ops = {
555 .tf_load = ata_tf_load,
556 .tf_read = ata_tf_read,
557 .check_status = ata_check_status,
558 .exec_command = ata_exec_command,
559 .dev_select = ata_std_dev_select,
561 .cable_detect = ata_cable_sata,
563 .qc_prep = mv_qc_prep,
564 .qc_issue = mv_qc_issue,
565 .data_xfer = ata_data_xfer,
567 .irq_clear = mv_irq_clear,
568 .irq_on = ata_irq_on,
570 .error_handler = mv_error_handler,
571 .freeze = mv_eh_freeze,
574 .scr_read = mv5_scr_read,
575 .scr_write = mv5_scr_write,
577 .port_start = mv_port_start,
578 .port_stop = mv_port_stop,
581 static const struct ata_port_operations mv6_ops = {
582 .dev_config = mv6_dev_config,
583 .tf_load = ata_tf_load,
584 .tf_read = ata_tf_read,
585 .check_status = ata_check_status,
586 .exec_command = ata_exec_command,
587 .dev_select = ata_std_dev_select,
589 .cable_detect = ata_cable_sata,
591 .qc_prep = mv_qc_prep,
592 .qc_issue = mv_qc_issue,
593 .data_xfer = ata_data_xfer,
595 .irq_clear = mv_irq_clear,
596 .irq_on = ata_irq_on,
598 .error_handler = mv_error_handler,
599 .freeze = mv_eh_freeze,
601 .qc_defer = ata_std_qc_defer,
603 .scr_read = mv_scr_read,
604 .scr_write = mv_scr_write,
606 .port_start = mv_port_start,
607 .port_stop = mv_port_stop,
610 static const struct ata_port_operations mv_iie_ops = {
611 .tf_load = ata_tf_load,
612 .tf_read = ata_tf_read,
613 .check_status = ata_check_status,
614 .exec_command = ata_exec_command,
615 .dev_select = ata_std_dev_select,
617 .cable_detect = ata_cable_sata,
619 .qc_prep = mv_qc_prep_iie,
620 .qc_issue = mv_qc_issue,
621 .data_xfer = ata_data_xfer,
623 .irq_clear = mv_irq_clear,
624 .irq_on = ata_irq_on,
626 .error_handler = mv_error_handler,
627 .freeze = mv_eh_freeze,
629 .qc_defer = ata_std_qc_defer,
631 .scr_read = mv_scr_read,
632 .scr_write = mv_scr_write,
634 .port_start = mv_port_start,
635 .port_stop = mv_port_stop,
638 static const struct ata_port_info mv_port_info[] = {
640 .flags = MV_COMMON_FLAGS,
641 .pio_mask = 0x1f, /* pio0-4 */
642 .udma_mask = ATA_UDMA6,
643 .port_ops = &mv5_ops,
646 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
647 .pio_mask = 0x1f, /* pio0-4 */
648 .udma_mask = ATA_UDMA6,
649 .port_ops = &mv5_ops,
652 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
653 .pio_mask = 0x1f, /* pio0-4 */
654 .udma_mask = ATA_UDMA6,
655 .port_ops = &mv5_ops,
658 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
660 .pio_mask = 0x1f, /* pio0-4 */
661 .udma_mask = ATA_UDMA6,
662 .port_ops = &mv6_ops,
665 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
666 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
667 .pio_mask = 0x1f, /* pio0-4 */
668 .udma_mask = ATA_UDMA6,
669 .port_ops = &mv6_ops,
672 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
674 .pio_mask = 0x1f, /* pio0-4 */
675 .udma_mask = ATA_UDMA6,
676 .port_ops = &mv_iie_ops,
679 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
681 .pio_mask = 0x1f, /* pio0-4 */
682 .udma_mask = ATA_UDMA6,
683 .port_ops = &mv_iie_ops,
686 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
687 .pio_mask = 0x1f, /* pio0-4 */
688 .udma_mask = ATA_UDMA6,
689 .port_ops = &mv_iie_ops,
693 static const struct pci_device_id mv_pci_tbl[] = {
694 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
695 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
696 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
697 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
698 /* RocketRAID 1740/174x have different identifiers */
699 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
700 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
702 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
703 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
704 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
705 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
706 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
708 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
711 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
713 /* Marvell 7042 support */
714 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
716 /* Highpoint RocketRAID PCIe series */
717 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
718 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
720 { } /* terminate list */
723 static const struct mv_hw_ops mv5xxx_ops = {
724 .phy_errata = mv5_phy_errata,
725 .enable_leds = mv5_enable_leds,
726 .read_preamp = mv5_read_preamp,
727 .reset_hc = mv5_reset_hc,
728 .reset_flash = mv5_reset_flash,
729 .reset_bus = mv5_reset_bus,
732 static const struct mv_hw_ops mv6xxx_ops = {
733 .phy_errata = mv6_phy_errata,
734 .enable_leds = mv6_enable_leds,
735 .read_preamp = mv6_read_preamp,
736 .reset_hc = mv6_reset_hc,
737 .reset_flash = mv6_reset_flash,
738 .reset_bus = mv_reset_pci_bus,
741 static const struct mv_hw_ops mv_soc_ops = {
742 .phy_errata = mv6_phy_errata,
743 .enable_leds = mv_soc_enable_leds,
744 .read_preamp = mv_soc_read_preamp,
745 .reset_hc = mv_soc_reset_hc,
746 .reset_flash = mv_soc_reset_flash,
747 .reset_bus = mv_soc_reset_bus,
754 static inline void writelfl(unsigned long data, void __iomem *addr)
757 (void) readl(addr); /* flush to avoid PCI posted write */
760 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
762 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
765 static inline unsigned int mv_hc_from_port(unsigned int port)
767 return port >> MV_PORT_HC_SHIFT;
770 static inline unsigned int mv_hardport_from_port(unsigned int port)
772 return port & MV_PORT_MASK;
775 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
778 return mv_hc_base(base, mv_hc_from_port(port));
781 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
783 return mv_hc_base_from_port(base, port) +
784 MV_SATAHC_ARBTR_REG_SZ +
785 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
788 static inline void __iomem *mv_host_base(struct ata_host *host)
790 struct mv_host_priv *hpriv = host->private_data;
794 static inline void __iomem *mv_ap_base(struct ata_port *ap)
796 return mv_port_base(mv_host_base(ap->host), ap->port_no);
799 static inline int mv_get_hc_count(unsigned long port_flags)
801 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
804 static void mv_irq_clear(struct ata_port *ap)
808 static void mv_set_edma_ptrs(void __iomem *port_mmio,
809 struct mv_host_priv *hpriv,
810 struct mv_port_priv *pp)
815 * initialize request queue
817 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
819 WARN_ON(pp->crqb_dma & 0x3ff);
820 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
821 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
822 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
824 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
825 writelfl((pp->crqb_dma & 0xffffffff) | index,
826 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
828 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
831 * initialize response queue
833 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
835 WARN_ON(pp->crpb_dma & 0xff);
836 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
838 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
839 writelfl((pp->crpb_dma & 0xffffffff) | index,
840 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
842 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
844 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
845 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
849 * mv_start_dma - Enable eDMA engine
850 * @base: port base address
851 * @pp: port private data
853 * Verify the local cache of the eDMA state is accurate with a
857 * Inherited from caller.
859 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
860 struct mv_port_priv *pp, u8 protocol)
862 int want_ncq = (protocol == ATA_PROT_NCQ);
864 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
865 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
866 if (want_ncq != using_ncq)
869 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
870 struct mv_host_priv *hpriv = ap->host->private_data;
871 int hard_port = mv_hardport_from_port(ap->port_no);
872 void __iomem *hc_mmio = mv_hc_base_from_port(
873 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
874 u32 hc_irq_cause, ipending;
876 /* clear EDMA event indicators, if any */
877 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
879 /* clear EDMA interrupt indicator, if any */
880 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
881 ipending = (DEV_IRQ << hard_port) |
882 (CRPB_DMA_DONE << hard_port);
883 if (hc_irq_cause & ipending) {
884 writelfl(hc_irq_cause & ~ipending,
885 hc_mmio + HC_IRQ_CAUSE_OFS);
888 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
890 /* clear FIS IRQ Cause */
891 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
893 mv_set_edma_ptrs(port_mmio, hpriv, pp);
895 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
896 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
898 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
902 * __mv_stop_dma - Disable eDMA engine
903 * @ap: ATA channel to manipulate
905 * Verify the local cache of the eDMA state is accurate with a
909 * Inherited from caller.
911 static int __mv_stop_dma(struct ata_port *ap)
913 void __iomem *port_mmio = mv_ap_base(ap);
914 struct mv_port_priv *pp = ap->private_data;
918 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
919 /* Disable EDMA if active. The disable bit auto clears.
921 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
922 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
924 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
927 /* now properly wait for the eDMA to stop */
928 for (i = 1000; i > 0; i--) {
929 reg = readl(port_mmio + EDMA_CMD_OFS);
930 if (!(reg & EDMA_EN))
937 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
944 static int mv_stop_dma(struct ata_port *ap)
949 spin_lock_irqsave(&ap->host->lock, flags);
950 rc = __mv_stop_dma(ap);
951 spin_unlock_irqrestore(&ap->host->lock, flags);
957 static void mv_dump_mem(void __iomem *start, unsigned bytes)
960 for (b = 0; b < bytes; ) {
961 DPRINTK("%p: ", start + b);
962 for (w = 0; b < bytes && w < 4; w++) {
963 printk("%08x ", readl(start + b));
971 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
976 for (b = 0; b < bytes; ) {
977 DPRINTK("%02x: ", b);
978 for (w = 0; b < bytes && w < 4; w++) {
979 (void) pci_read_config_dword(pdev, b, &dw);
987 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
988 struct pci_dev *pdev)
991 void __iomem *hc_base = mv_hc_base(mmio_base,
992 port >> MV_PORT_HC_SHIFT);
993 void __iomem *port_base;
994 int start_port, num_ports, p, start_hc, num_hcs, hc;
997 start_hc = start_port = 0;
998 num_ports = 8; /* shld be benign for 4 port devs */
1001 start_hc = port >> MV_PORT_HC_SHIFT;
1003 num_ports = num_hcs = 1;
1005 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1006 num_ports > 1 ? num_ports - 1 : start_port);
1009 DPRINTK("PCI config space regs:\n");
1010 mv_dump_pci_cfg(pdev, 0x68);
1012 DPRINTK("PCI regs:\n");
1013 mv_dump_mem(mmio_base+0xc00, 0x3c);
1014 mv_dump_mem(mmio_base+0xd00, 0x34);
1015 mv_dump_mem(mmio_base+0xf00, 0x4);
1016 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1017 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1018 hc_base = mv_hc_base(mmio_base, hc);
1019 DPRINTK("HC regs (HC %i):\n", hc);
1020 mv_dump_mem(hc_base, 0x1c);
1022 for (p = start_port; p < start_port + num_ports; p++) {
1023 port_base = mv_port_base(mmio_base, p);
1024 DPRINTK("EDMA regs (port %i):\n", p);
1025 mv_dump_mem(port_base, 0x54);
1026 DPRINTK("SATA regs (port %i):\n", p);
1027 mv_dump_mem(port_base+0x300, 0x60);
1032 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1036 switch (sc_reg_in) {
1040 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1043 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1052 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1054 unsigned int ofs = mv_scr_offset(sc_reg_in);
1056 if (ofs != 0xffffffffU) {
1057 *val = readl(mv_ap_base(ap) + ofs);
1063 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1065 unsigned int ofs = mv_scr_offset(sc_reg_in);
1067 if (ofs != 0xffffffffU) {
1068 writelfl(val, mv_ap_base(ap) + ofs);
1074 static void mv6_dev_config(struct ata_device *adev)
1077 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1078 * See mv_qc_prep() for more info.
1080 if (adev->flags & ATA_DFLAG_NCQ)
1081 if (adev->max_sectors > ATA_MAX_SECTORS)
1082 adev->max_sectors = ATA_MAX_SECTORS;
1085 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1086 void __iomem *port_mmio, int want_ncq)
1090 /* set up non-NCQ EDMA configuration */
1091 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1093 if (IS_GEN_I(hpriv))
1094 cfg |= (1 << 8); /* enab config burst size mask */
1096 else if (IS_GEN_II(hpriv))
1097 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1099 else if (IS_GEN_IIE(hpriv)) {
1100 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1101 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1102 cfg |= (1 << 18); /* enab early completion */
1103 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1107 cfg |= EDMA_CFG_NCQ;
1108 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1110 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1112 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1115 static void mv_port_free_dma_mem(struct ata_port *ap)
1117 struct mv_host_priv *hpriv = ap->host->private_data;
1118 struct mv_port_priv *pp = ap->private_data;
1122 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1126 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1130 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1131 * For later hardware, we have one unique sg_tbl per NCQ tag.
1133 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1134 if (pp->sg_tbl[tag]) {
1135 if (tag == 0 || !IS_GEN_I(hpriv))
1136 dma_pool_free(hpriv->sg_tbl_pool,
1138 pp->sg_tbl_dma[tag]);
1139 pp->sg_tbl[tag] = NULL;
1145 * mv_port_start - Port specific init/start routine.
1146 * @ap: ATA channel to manipulate
1148 * Allocate and point to DMA memory, init port private memory,
1152 * Inherited from caller.
1154 static int mv_port_start(struct ata_port *ap)
1156 struct device *dev = ap->host->dev;
1157 struct mv_host_priv *hpriv = ap->host->private_data;
1158 struct mv_port_priv *pp;
1159 void __iomem *port_mmio = mv_ap_base(ap);
1160 unsigned long flags;
1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1166 ap->private_data = pp;
1168 rc = ata_pad_alloc(ap, dev);
1172 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1175 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1177 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1179 goto out_port_free_dma_mem;
1180 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1183 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1184 * For later hardware, we need one unique sg_tbl per NCQ tag.
1186 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1187 if (tag == 0 || !IS_GEN_I(hpriv)) {
1188 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1189 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1190 if (!pp->sg_tbl[tag])
1191 goto out_port_free_dma_mem;
1193 pp->sg_tbl[tag] = pp->sg_tbl[0];
1194 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1198 spin_lock_irqsave(&ap->host->lock, flags);
1200 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1201 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1203 spin_unlock_irqrestore(&ap->host->lock, flags);
1205 /* Don't turn on EDMA here...do it before DMA commands only. Else
1206 * we'll be unable to send non-data, PIO, etc due to restricted access
1211 out_port_free_dma_mem:
1212 mv_port_free_dma_mem(ap);
1217 * mv_port_stop - Port specific cleanup/stop routine.
1218 * @ap: ATA channel to manipulate
1220 * Stop DMA, cleanup port memory.
1223 * This routine uses the host lock to protect the DMA stop.
1225 static void mv_port_stop(struct ata_port *ap)
1228 mv_port_free_dma_mem(ap);
1232 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1233 * @qc: queued command whose SG list to source from
1235 * Populate the SG list and mark the last entry.
1238 * Inherited from caller.
1240 static void mv_fill_sg(struct ata_queued_cmd *qc)
1242 struct mv_port_priv *pp = qc->ap->private_data;
1243 struct scatterlist *sg;
1244 struct mv_sg *mv_sg, *last_sg = NULL;
1247 mv_sg = pp->sg_tbl[qc->tag];
1248 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1249 dma_addr_t addr = sg_dma_address(sg);
1250 u32 sg_len = sg_dma_len(sg);
1253 u32 offset = addr & 0xffff;
1256 if ((offset + sg_len > 0x10000))
1257 len = 0x10000 - offset;
1259 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1260 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1261 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1271 if (likely(last_sg))
1272 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1275 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1277 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1278 (last ? CRQB_CMD_LAST : 0);
1279 *cmdw = cpu_to_le16(tmp);
1283 * mv_qc_prep - Host specific command preparation.
1284 * @qc: queued command to prepare
1286 * This routine simply redirects to the general purpose routine
1287 * if command is not DMA. Else, it handles prep of the CRQB
1288 * (command request block), does some sanity checking, and calls
1289 * the SG load routine.
1292 * Inherited from caller.
1294 static void mv_qc_prep(struct ata_queued_cmd *qc)
1296 struct ata_port *ap = qc->ap;
1297 struct mv_port_priv *pp = ap->private_data;
1299 struct ata_taskfile *tf;
1303 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1304 (qc->tf.protocol != ATA_PROT_NCQ))
1307 /* Fill in command request block
1309 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1310 flags |= CRQB_FLAG_READ;
1311 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1312 flags |= qc->tag << CRQB_TAG_SHIFT;
1314 /* get current queue index from software */
1315 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1317 pp->crqb[in_index].sg_addr =
1318 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1319 pp->crqb[in_index].sg_addr_hi =
1320 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1321 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1323 cw = &pp->crqb[in_index].ata_cmd[0];
1326 /* Sadly, the CRQB cannot accomodate all registers--there are
1327 * only 11 bytes...so we must pick and choose required
1328 * registers based on the command. So, we drop feature and
1329 * hob_feature for [RW] DMA commands, but they are needed for
1330 * NCQ. NCQ will drop hob_nsect.
1332 switch (tf->command) {
1334 case ATA_CMD_READ_EXT:
1336 case ATA_CMD_WRITE_EXT:
1337 case ATA_CMD_WRITE_FUA_EXT:
1338 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1340 case ATA_CMD_FPDMA_READ:
1341 case ATA_CMD_FPDMA_WRITE:
1342 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1343 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1346 /* The only other commands EDMA supports in non-queued and
1347 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1348 * of which are defined/used by Linux. If we get here, this
1349 * driver needs work.
1351 * FIXME: modify libata to give qc_prep a return value and
1352 * return error here.
1354 BUG_ON(tf->command);
1357 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1358 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1359 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1360 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1361 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1362 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1363 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1364 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1365 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1367 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1373 * mv_qc_prep_iie - Host specific command preparation.
1374 * @qc: queued command to prepare
1376 * This routine simply redirects to the general purpose routine
1377 * if command is not DMA. Else, it handles prep of the CRQB
1378 * (command request block), does some sanity checking, and calls
1379 * the SG load routine.
1382 * Inherited from caller.
1384 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1386 struct ata_port *ap = qc->ap;
1387 struct mv_port_priv *pp = ap->private_data;
1388 struct mv_crqb_iie *crqb;
1389 struct ata_taskfile *tf;
1393 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1394 (qc->tf.protocol != ATA_PROT_NCQ))
1397 /* Fill in Gen IIE command request block
1399 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1400 flags |= CRQB_FLAG_READ;
1402 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1403 flags |= qc->tag << CRQB_TAG_SHIFT;
1404 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1406 /* get current queue index from software */
1407 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1409 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1410 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1411 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1412 crqb->flags = cpu_to_le32(flags);
1415 crqb->ata_cmd[0] = cpu_to_le32(
1416 (tf->command << 16) |
1419 crqb->ata_cmd[1] = cpu_to_le32(
1425 crqb->ata_cmd[2] = cpu_to_le32(
1426 (tf->hob_lbal << 0) |
1427 (tf->hob_lbam << 8) |
1428 (tf->hob_lbah << 16) |
1429 (tf->hob_feature << 24)
1431 crqb->ata_cmd[3] = cpu_to_le32(
1433 (tf->hob_nsect << 8)
1436 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1442 * mv_qc_issue - Initiate a command to the host
1443 * @qc: queued command to start
1445 * This routine simply redirects to the general purpose routine
1446 * if command is not DMA. Else, it sanity checks our local
1447 * caches of the request producer/consumer indices then enables
1448 * DMA and bumps the request producer index.
1451 * Inherited from caller.
1453 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1455 struct ata_port *ap = qc->ap;
1456 void __iomem *port_mmio = mv_ap_base(ap);
1457 struct mv_port_priv *pp = ap->private_data;
1460 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1461 (qc->tf.protocol != ATA_PROT_NCQ)) {
1462 /* We're about to send a non-EDMA capable command to the
1463 * port. Turn off EDMA so there won't be problems accessing
1464 * shadow block, etc registers.
1467 return ata_qc_issue_prot(qc);
1470 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1474 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1476 /* and write the request in pointer to kick the EDMA to life */
1477 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1478 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1484 * mv_err_intr - Handle error interrupts on the port
1485 * @ap: ATA channel to manipulate
1486 * @reset_allowed: bool: 0 == don't trigger from reset here
1488 * In most cases, just clear the interrupt and move on. However,
1489 * some cases require an eDMA reset, which is done right before
1490 * the COMRESET in mv_phy_reset(). The SERR case requires a
1491 * clear of pending errors in the SATA SERROR register. Finally,
1492 * if the port disabled DMA, update our cached copy to match.
1495 * Inherited from caller.
1497 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1499 void __iomem *port_mmio = mv_ap_base(ap);
1500 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1501 struct mv_port_priv *pp = ap->private_data;
1502 struct mv_host_priv *hpriv = ap->host->private_data;
1503 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1504 unsigned int action = 0, err_mask = 0;
1505 struct ata_eh_info *ehi = &ap->link.eh_info;
1507 ata_ehi_clear_desc(ehi);
1509 if (!edma_enabled) {
1510 /* just a guess: do we need to do this? should we
1511 * expand this, and do it in all cases?
1513 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1514 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1517 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1519 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1522 * all generations share these EDMA error cause bits
1525 if (edma_err_cause & EDMA_ERR_DEV)
1526 err_mask |= AC_ERR_DEV;
1527 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1528 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1529 EDMA_ERR_INTRL_PAR)) {
1530 err_mask |= AC_ERR_ATA_BUS;
1531 action |= ATA_EH_HARDRESET;
1532 ata_ehi_push_desc(ehi, "parity error");
1534 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1535 ata_ehi_hotplugged(ehi);
1536 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1537 "dev disconnect" : "dev connect");
1538 action |= ATA_EH_HARDRESET;
1541 if (IS_GEN_I(hpriv)) {
1542 eh_freeze_mask = EDMA_EH_FREEZE_5;
1544 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1545 struct mv_port_priv *pp = ap->private_data;
1546 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1547 ata_ehi_push_desc(ehi, "EDMA self-disable");
1550 eh_freeze_mask = EDMA_EH_FREEZE;
1552 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1553 struct mv_port_priv *pp = ap->private_data;
1554 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1555 ata_ehi_push_desc(ehi, "EDMA self-disable");
1558 if (edma_err_cause & EDMA_ERR_SERR) {
1559 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1560 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1561 err_mask = AC_ERR_ATA_BUS;
1562 action |= ATA_EH_HARDRESET;
1566 /* Clear EDMA now that SERR cleanup done */
1567 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1570 err_mask = AC_ERR_OTHER;
1571 action |= ATA_EH_HARDRESET;
1574 ehi->serror |= serr;
1575 ehi->action |= action;
1578 qc->err_mask |= err_mask;
1580 ehi->err_mask |= err_mask;
1582 if (edma_err_cause & eh_freeze_mask)
1583 ata_port_freeze(ap);
1588 static void mv_intr_pio(struct ata_port *ap)
1590 struct ata_queued_cmd *qc;
1593 /* ignore spurious intr if drive still BUSY */
1594 ata_status = readb(ap->ioaddr.status_addr);
1595 if (unlikely(ata_status & ATA_BUSY))
1598 /* get active ATA command */
1599 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1600 if (unlikely(!qc)) /* no active tag */
1602 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1605 /* and finally, complete the ATA command */
1606 qc->err_mask |= ac_err_mask(ata_status);
1607 ata_qc_complete(qc);
1610 static void mv_intr_edma(struct ata_port *ap)
1612 void __iomem *port_mmio = mv_ap_base(ap);
1613 struct mv_host_priv *hpriv = ap->host->private_data;
1614 struct mv_port_priv *pp = ap->private_data;
1615 struct ata_queued_cmd *qc;
1616 u32 out_index, in_index;
1617 bool work_done = false;
1619 /* get h/w response queue pointer */
1620 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1621 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1627 /* get s/w response queue last-read pointer, and compare */
1628 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1629 if (in_index == out_index)
1632 /* 50xx: get active ATA command */
1633 if (IS_GEN_I(hpriv))
1634 tag = ap->link.active_tag;
1636 /* Gen II/IIE: get active ATA command via tag, to enable
1637 * support for queueing. this works transparently for
1638 * queued and non-queued modes.
1641 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1643 qc = ata_qc_from_tag(ap, tag);
1645 /* For non-NCQ mode, the lower 8 bits of status
1646 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1647 * which should be zero if all went well.
1649 status = le16_to_cpu(pp->crpb[out_index].flags);
1650 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1651 mv_err_intr(ap, qc);
1655 /* and finally, complete the ATA command */
1658 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1659 ata_qc_complete(qc);
1662 /* advance software response queue pointer, to
1663 * indicate (after the loop completes) to hardware
1664 * that we have consumed a response queue entry.
1671 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1672 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1673 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1677 * mv_host_intr - Handle all interrupts on the given host controller
1678 * @host: host specific structure
1679 * @relevant: port error bits relevant to this host controller
1680 * @hc: which host controller we're to look at
1682 * Read then write clear the HC interrupt status then walk each
1683 * port connected to the HC and see if it needs servicing. Port
1684 * success ints are reported in the HC interrupt status reg, the
1685 * port error ints are reported in the higher level main
1686 * interrupt status register and thus are passed in via the
1687 * 'relevant' argument.
1690 * Inherited from caller.
1692 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1694 struct mv_host_priv *hpriv = host->private_data;
1695 void __iomem *mmio = hpriv->base;
1696 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1698 int port, port0, last_port;
1703 port0 = MV_PORTS_PER_HC;
1706 last_port = port0 + MV_PORTS_PER_HC;
1708 last_port = port0 + hpriv->n_ports;
1709 /* we'll need the HC success int register in most cases */
1710 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1714 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1716 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1717 hc, relevant, hc_irq_cause);
1719 for (port = port0; port < port0 + last_port; port++) {
1720 struct ata_port *ap = host->ports[port];
1721 struct mv_port_priv *pp = ap->private_data;
1722 int have_err_bits, hard_port, shift;
1724 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1727 shift = port << 1; /* (port * 2) */
1728 if (port >= MV_PORTS_PER_HC) {
1729 shift++; /* skip bit 8 in the HC Main IRQ reg */
1731 have_err_bits = ((PORT0_ERR << shift) & relevant);
1733 if (unlikely(have_err_bits)) {
1734 struct ata_queued_cmd *qc;
1736 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1737 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1740 mv_err_intr(ap, qc);
1744 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1746 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1747 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1750 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1757 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1759 struct mv_host_priv *hpriv = host->private_data;
1760 struct ata_port *ap;
1761 struct ata_queued_cmd *qc;
1762 struct ata_eh_info *ehi;
1763 unsigned int i, err_mask, printed = 0;
1766 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1768 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1771 DPRINTK("All regs @ PCI error\n");
1772 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1774 writelfl(0, mmio + hpriv->irq_cause_ofs);
1776 for (i = 0; i < host->n_ports; i++) {
1777 ap = host->ports[i];
1778 if (!ata_link_offline(&ap->link)) {
1779 ehi = &ap->link.eh_info;
1780 ata_ehi_clear_desc(ehi);
1782 ata_ehi_push_desc(ehi,
1783 "PCI err cause 0x%08x", err_cause);
1784 err_mask = AC_ERR_HOST_BUS;
1785 ehi->action = ATA_EH_HARDRESET;
1786 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1788 qc->err_mask |= err_mask;
1790 ehi->err_mask |= err_mask;
1792 ata_port_freeze(ap);
1798 * mv_interrupt - Main interrupt event handler
1800 * @dev_instance: private data; in this case the host structure
1802 * Read the read only register to determine if any host
1803 * controllers have pending interrupts. If so, call lower level
1804 * routine to handle. Also check for PCI errors which are only
1808 * This routine holds the host lock while processing pending
1811 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1813 struct ata_host *host = dev_instance;
1814 struct mv_host_priv *hpriv = host->private_data;
1815 unsigned int hc, handled = 0, n_hcs;
1816 void __iomem *mmio = hpriv->base;
1817 u32 irq_stat, irq_mask;
1819 spin_lock(&host->lock);
1821 irq_stat = readl(hpriv->main_cause_reg_addr);
1822 irq_mask = readl(hpriv->main_mask_reg_addr);
1824 /* check the cases where we either have nothing pending or have read
1825 * a bogus register value which can indicate HW removal or PCI fault
1827 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1830 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1832 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1833 mv_pci_error(host, mmio);
1835 goto out_unlock; /* skip all other HC irq handling */
1838 for (hc = 0; hc < n_hcs; hc++) {
1839 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1841 mv_host_intr(host, relevant, hc);
1847 spin_unlock(&host->lock);
1849 return IRQ_RETVAL(handled);
1852 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1854 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1855 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1857 return hc_mmio + ofs;
1860 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1864 switch (sc_reg_in) {
1868 ofs = sc_reg_in * sizeof(u32);
1877 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1879 struct mv_host_priv *hpriv = ap->host->private_data;
1880 void __iomem *mmio = hpriv->base;
1881 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1882 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1884 if (ofs != 0xffffffffU) {
1885 *val = readl(addr + ofs);
1891 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1893 struct mv_host_priv *hpriv = ap->host->private_data;
1894 void __iomem *mmio = hpriv->base;
1895 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1896 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1898 if (ofs != 0xffffffffU) {
1899 writelfl(val, addr + ofs);
1905 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1907 struct pci_dev *pdev = to_pci_dev(host->dev);
1910 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1913 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1915 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1918 mv_reset_pci_bus(host, mmio);
1921 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1923 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1926 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1929 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1932 tmp = readl(phy_mmio + MV5_PHY_MODE);
1934 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1935 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1938 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1942 writel(0, mmio + MV_GPIO_PORT_CTL);
1944 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1946 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1948 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1951 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1954 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1955 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1957 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1960 tmp = readl(phy_mmio + MV5_LT_MODE);
1962 writel(tmp, phy_mmio + MV5_LT_MODE);
1964 tmp = readl(phy_mmio + MV5_PHY_CTL);
1967 writel(tmp, phy_mmio + MV5_PHY_CTL);
1970 tmp = readl(phy_mmio + MV5_PHY_MODE);
1972 tmp |= hpriv->signal[port].pre;
1973 tmp |= hpriv->signal[port].amps;
1974 writel(tmp, phy_mmio + MV5_PHY_MODE);
1979 #define ZERO(reg) writel(0, port_mmio + (reg))
1980 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1983 void __iomem *port_mmio = mv_port_base(mmio, port);
1985 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1987 mv_channel_reset(hpriv, mmio, port);
1989 ZERO(0x028); /* command */
1990 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1991 ZERO(0x004); /* timer */
1992 ZERO(0x008); /* irq err cause */
1993 ZERO(0x00c); /* irq err mask */
1994 ZERO(0x010); /* rq bah */
1995 ZERO(0x014); /* rq inp */
1996 ZERO(0x018); /* rq outp */
1997 ZERO(0x01c); /* respq bah */
1998 ZERO(0x024); /* respq outp */
1999 ZERO(0x020); /* respq inp */
2000 ZERO(0x02c); /* test control */
2001 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2005 #define ZERO(reg) writel(0, hc_mmio + (reg))
2006 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2009 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2017 tmp = readl(hc_mmio + 0x20);
2020 writel(tmp, hc_mmio + 0x20);
2024 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2027 unsigned int hc, port;
2029 for (hc = 0; hc < n_hc; hc++) {
2030 for (port = 0; port < MV_PORTS_PER_HC; port++)
2031 mv5_reset_hc_port(hpriv, mmio,
2032 (hc * MV_PORTS_PER_HC) + port);
2034 mv5_reset_one_hc(hpriv, mmio, hc);
2041 #define ZERO(reg) writel(0, mmio + (reg))
2042 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2044 struct mv_host_priv *hpriv = host->private_data;
2047 tmp = readl(mmio + MV_PCI_MODE);
2049 writel(tmp, mmio + MV_PCI_MODE);
2051 ZERO(MV_PCI_DISC_TIMER);
2052 ZERO(MV_PCI_MSI_TRIGGER);
2053 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2054 ZERO(HC_MAIN_IRQ_MASK_OFS);
2055 ZERO(MV_PCI_SERR_MASK);
2056 ZERO(hpriv->irq_cause_ofs);
2057 ZERO(hpriv->irq_mask_ofs);
2058 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2059 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2060 ZERO(MV_PCI_ERR_ATTRIBUTE);
2061 ZERO(MV_PCI_ERR_COMMAND);
2065 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2069 mv5_reset_flash(hpriv, mmio);
2071 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2073 tmp |= (1 << 5) | (1 << 6);
2074 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2078 * mv6_reset_hc - Perform the 6xxx global soft reset
2079 * @mmio: base address of the HBA
2081 * This routine only applies to 6xxx parts.
2084 * Inherited from caller.
2086 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2089 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2093 /* Following procedure defined in PCI "main command and status
2097 writel(t | STOP_PCI_MASTER, reg);
2099 for (i = 0; i < 1000; i++) {
2102 if (PCI_MASTER_EMPTY & t)
2105 if (!(PCI_MASTER_EMPTY & t)) {
2106 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2114 writel(t | GLOB_SFT_RST, reg);
2117 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2119 if (!(GLOB_SFT_RST & t)) {
2120 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2125 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2128 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2131 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2133 if (GLOB_SFT_RST & t) {
2134 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2141 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2144 void __iomem *port_mmio;
2147 tmp = readl(mmio + MV_RESET_CFG);
2148 if ((tmp & (1 << 0)) == 0) {
2149 hpriv->signal[idx].amps = 0x7 << 8;
2150 hpriv->signal[idx].pre = 0x1 << 5;
2154 port_mmio = mv_port_base(mmio, idx);
2155 tmp = readl(port_mmio + PHY_MODE2);
2157 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2158 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2161 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2163 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2166 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2169 void __iomem *port_mmio = mv_port_base(mmio, port);
2171 u32 hp_flags = hpriv->hp_flags;
2173 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2175 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2178 if (fix_phy_mode2) {
2179 m2 = readl(port_mmio + PHY_MODE2);
2182 writel(m2, port_mmio + PHY_MODE2);
2186 m2 = readl(port_mmio + PHY_MODE2);
2187 m2 &= ~((1 << 16) | (1 << 31));
2188 writel(m2, port_mmio + PHY_MODE2);
2193 /* who knows what this magic does */
2194 tmp = readl(port_mmio + PHY_MODE3);
2197 writel(tmp, port_mmio + PHY_MODE3);
2199 if (fix_phy_mode4) {
2202 m4 = readl(port_mmio + PHY_MODE4);
2204 if (hp_flags & MV_HP_ERRATA_60X1B2)
2205 tmp = readl(port_mmio + 0x310);
2207 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2209 writel(m4, port_mmio + PHY_MODE4);
2211 if (hp_flags & MV_HP_ERRATA_60X1B2)
2212 writel(tmp, port_mmio + 0x310);
2215 /* Revert values of pre-emphasis and signal amps to the saved ones */
2216 m2 = readl(port_mmio + PHY_MODE2);
2218 m2 &= ~MV_M2_PREAMP_MASK;
2219 m2 |= hpriv->signal[port].amps;
2220 m2 |= hpriv->signal[port].pre;
2223 /* according to mvSata 3.6.1, some IIE values are fixed */
2224 if (IS_GEN_IIE(hpriv)) {
2229 writel(m2, port_mmio + PHY_MODE2);
2232 /* TODO: use the generic LED interface to configure the SATA Presence */
2233 /* & Acitivy LEDs on the board */
2234 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2240 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2243 void __iomem *port_mmio;
2246 port_mmio = mv_port_base(mmio, idx);
2247 tmp = readl(port_mmio + PHY_MODE2);
2249 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2250 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2254 #define ZERO(reg) writel(0, port_mmio + (reg))
2255 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2256 void __iomem *mmio, unsigned int port)
2258 void __iomem *port_mmio = mv_port_base(mmio, port);
2260 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2262 mv_channel_reset(hpriv, mmio, port);
2264 ZERO(0x028); /* command */
2265 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2266 ZERO(0x004); /* timer */
2267 ZERO(0x008); /* irq err cause */
2268 ZERO(0x00c); /* irq err mask */
2269 ZERO(0x010); /* rq bah */
2270 ZERO(0x014); /* rq inp */
2271 ZERO(0x018); /* rq outp */
2272 ZERO(0x01c); /* respq bah */
2273 ZERO(0x024); /* respq outp */
2274 ZERO(0x020); /* respq inp */
2275 ZERO(0x02c); /* test control */
2276 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2281 #define ZERO(reg) writel(0, hc_mmio + (reg))
2282 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2285 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2295 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2296 void __iomem *mmio, unsigned int n_hc)
2300 for (port = 0; port < hpriv->n_ports; port++)
2301 mv_soc_reset_hc_port(hpriv, mmio, port);
2303 mv_soc_reset_one_hc(hpriv, mmio);
2308 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2314 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2319 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2320 unsigned int port_no)
2322 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2324 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2326 if (IS_GEN_II(hpriv)) {
2327 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2328 ifctl |= (1 << 7); /* enable gen2i speed */
2329 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2330 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2333 udelay(25); /* allow reset propagation */
2335 /* Spec never mentions clearing the bit. Marvell's driver does
2336 * clear the bit, however.
2338 writelfl(0, port_mmio + EDMA_CMD_OFS);
2340 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2342 if (IS_GEN_I(hpriv))
2347 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2348 * @ap: ATA channel to manipulate
2350 * Part of this is taken from __sata_phy_reset and modified to
2351 * not sleep since this routine gets called from interrupt level.
2354 * Inherited from caller. This is coded to safe to call at
2355 * interrupt level, i.e. it does not sleep.
2357 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2358 unsigned long deadline)
2360 struct mv_port_priv *pp = ap->private_data;
2361 struct mv_host_priv *hpriv = ap->host->private_data;
2362 void __iomem *port_mmio = mv_ap_base(ap);
2366 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2370 u32 sstatus, serror, scontrol;
2372 mv_scr_read(ap, SCR_STATUS, &sstatus);
2373 mv_scr_read(ap, SCR_ERROR, &serror);
2374 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2375 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2376 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2380 /* Issue COMRESET via SControl */
2382 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2385 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2389 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2390 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2394 } while (time_before(jiffies, deadline));
2396 /* work around errata */
2397 if (IS_GEN_II(hpriv) &&
2398 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2400 goto comreset_retry;
2404 u32 sstatus, serror, scontrol;
2406 mv_scr_read(ap, SCR_STATUS, &sstatus);
2407 mv_scr_read(ap, SCR_ERROR, &serror);
2408 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2409 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2410 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2414 if (ata_link_offline(&ap->link)) {
2415 *class = ATA_DEV_NONE;
2419 /* even after SStatus reflects that device is ready,
2420 * it seems to take a while for link to be fully
2421 * established (and thus Status no longer 0x80/0x7F),
2422 * so we poll a bit for that, here.
2426 u8 drv_stat = ata_check_status(ap);
2427 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2432 if (time_after(jiffies, deadline))
2436 /* FIXME: if we passed the deadline, the following
2437 * code probably produces an invalid result
2440 /* finally, read device signature from TF registers */
2441 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2443 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2445 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2450 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2452 struct ata_port *ap = link->ap;
2453 struct mv_port_priv *pp = ap->private_data;
2454 struct ata_eh_context *ehc = &link->eh_context;
2457 rc = mv_stop_dma(ap);
2459 ehc->i.action |= ATA_EH_HARDRESET;
2461 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2462 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2463 ehc->i.action |= ATA_EH_HARDRESET;
2466 /* if we're about to do hardreset, nothing more to do */
2467 if (ehc->i.action & ATA_EH_HARDRESET)
2470 if (ata_link_online(link))
2471 rc = ata_wait_ready(ap, deadline);
2478 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2479 unsigned long deadline)
2481 struct ata_port *ap = link->ap;
2482 struct mv_host_priv *hpriv = ap->host->private_data;
2483 void __iomem *mmio = hpriv->base;
2487 mv_channel_reset(hpriv, mmio, ap->port_no);
2489 mv_phy_reset(ap, class, deadline);
2494 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2496 struct ata_port *ap = link->ap;
2499 /* print link status */
2500 sata_print_link_status(link);
2503 sata_scr_read(link, SCR_ERROR, &serr);
2504 sata_scr_write_flush(link, SCR_ERROR, serr);
2506 /* bail out if no device is present */
2507 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2508 DPRINTK("EXIT, no device\n");
2512 /* set up device control */
2513 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2516 static void mv_error_handler(struct ata_port *ap)
2518 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2519 mv_hardreset, mv_postreset);
2522 static void mv_eh_freeze(struct ata_port *ap)
2524 struct mv_host_priv *hpriv = ap->host->private_data;
2525 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2529 /* FIXME: handle coalescing completion events properly */
2531 shift = ap->port_no * 2;
2535 mask = 0x3 << shift;
2537 /* disable assertion of portN err, done events */
2538 tmp = readl(hpriv->main_mask_reg_addr);
2539 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2542 static void mv_eh_thaw(struct ata_port *ap)
2544 struct mv_host_priv *hpriv = ap->host->private_data;
2545 void __iomem *mmio = hpriv->base;
2546 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2547 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2548 void __iomem *port_mmio = mv_ap_base(ap);
2549 u32 tmp, mask, hc_irq_cause;
2550 unsigned int shift, hc_port_no = ap->port_no;
2552 /* FIXME: handle coalescing completion events properly */
2554 shift = ap->port_no * 2;
2560 mask = 0x3 << shift;
2562 /* clear EDMA errors on this port */
2563 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2565 /* clear pending irq events */
2566 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2567 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2568 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2569 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2571 /* enable assertion of portN err, done events */
2572 tmp = readl(hpriv->main_mask_reg_addr);
2573 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2577 * mv_port_init - Perform some early initialization on a single port.
2578 * @port: libata data structure storing shadow register addresses
2579 * @port_mmio: base address of the port
2581 * Initialize shadow register mmio addresses, clear outstanding
2582 * interrupts on the port, and unmask interrupts for the future
2583 * start of the port.
2586 * Inherited from caller.
2588 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2590 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2593 /* PIO related setup
2595 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2597 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2598 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2599 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2600 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2601 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2602 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2604 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2605 /* special case: control/altstatus doesn't have ATA_REG_ address */
2606 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2609 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2611 /* Clear any currently outstanding port interrupt conditions */
2612 serr_ofs = mv_scr_offset(SCR_ERROR);
2613 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2614 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2616 /* unmask all non-transient EDMA error interrupts */
2617 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2619 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2620 readl(port_mmio + EDMA_CFG_OFS),
2621 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2622 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2625 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2627 struct pci_dev *pdev = to_pci_dev(host->dev);
2628 struct mv_host_priv *hpriv = host->private_data;
2629 u32 hp_flags = hpriv->hp_flags;
2631 switch (board_idx) {
2633 hpriv->ops = &mv5xxx_ops;
2634 hp_flags |= MV_HP_GEN_I;
2636 switch (pdev->revision) {
2638 hp_flags |= MV_HP_ERRATA_50XXB0;
2641 hp_flags |= MV_HP_ERRATA_50XXB2;
2644 dev_printk(KERN_WARNING, &pdev->dev,
2645 "Applying 50XXB2 workarounds to unknown rev\n");
2646 hp_flags |= MV_HP_ERRATA_50XXB2;
2653 hpriv->ops = &mv5xxx_ops;
2654 hp_flags |= MV_HP_GEN_I;
2656 switch (pdev->revision) {
2658 hp_flags |= MV_HP_ERRATA_50XXB0;
2661 hp_flags |= MV_HP_ERRATA_50XXB2;
2664 dev_printk(KERN_WARNING, &pdev->dev,
2665 "Applying B2 workarounds to unknown rev\n");
2666 hp_flags |= MV_HP_ERRATA_50XXB2;
2673 hpriv->ops = &mv6xxx_ops;
2674 hp_flags |= MV_HP_GEN_II;
2676 switch (pdev->revision) {
2678 hp_flags |= MV_HP_ERRATA_60X1B2;
2681 hp_flags |= MV_HP_ERRATA_60X1C0;
2684 dev_printk(KERN_WARNING, &pdev->dev,
2685 "Applying B2 workarounds to unknown rev\n");
2686 hp_flags |= MV_HP_ERRATA_60X1B2;
2692 hp_flags |= MV_HP_PCIE;
2693 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2694 (pdev->device == 0x2300 || pdev->device == 0x2310))
2697 * Highpoint RocketRAID PCIe 23xx series cards:
2699 * Unconfigured drives are treated as "Legacy"
2700 * by the BIOS, and it overwrites sector 8 with
2701 * a "Lgcy" metadata block prior to Linux boot.
2703 * Configured drives (RAID or JBOD) leave sector 8
2704 * alone, but instead overwrite a high numbered
2705 * sector for the RAID metadata. This sector can
2706 * be determined exactly, by truncating the physical
2707 * drive capacity to a nice even GB value.
2709 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2711 * Warn the user, lest they think we're just buggy.
2713 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2714 " BIOS CORRUPTS DATA on all attached drives,"
2715 " regardless of if/how they are configured."
2717 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2718 " use sectors 8-9 on \"Legacy\" drives,"
2719 " and avoid the final two gigabytes on"
2720 " all RocketRAID BIOS initialized drives.\n");
2723 hpriv->ops = &mv6xxx_ops;
2724 hp_flags |= MV_HP_GEN_IIE;
2726 switch (pdev->revision) {
2728 hp_flags |= MV_HP_ERRATA_XX42A0;
2731 hp_flags |= MV_HP_ERRATA_60X1C0;
2734 dev_printk(KERN_WARNING, &pdev->dev,
2735 "Applying 60X1C0 workarounds to unknown rev\n");
2736 hp_flags |= MV_HP_ERRATA_60X1C0;
2741 hpriv->ops = &mv_soc_ops;
2742 hp_flags |= MV_HP_ERRATA_60X1C0;
2746 dev_printk(KERN_ERR, host->dev,
2747 "BUG: invalid board index %u\n", board_idx);
2751 hpriv->hp_flags = hp_flags;
2752 if (hp_flags & MV_HP_PCIE) {
2753 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2754 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2755 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2757 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2758 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2759 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2766 * mv_init_host - Perform some early initialization of the host.
2767 * @host: ATA host to initialize
2768 * @board_idx: controller index
2770 * If possible, do an early global reset of the host. Then do
2771 * our port init and clear/unmask all/relevant host interrupts.
2774 * Inherited from caller.
2776 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2778 int rc = 0, n_hc, port, hc;
2779 struct mv_host_priv *hpriv = host->private_data;
2780 void __iomem *mmio = hpriv->base;
2782 rc = mv_chip_id(host, board_idx);
2786 if (HAS_PCI(host)) {
2787 hpriv->main_cause_reg_addr = hpriv->base +
2788 HC_MAIN_IRQ_CAUSE_OFS;
2789 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2791 hpriv->main_cause_reg_addr = hpriv->base +
2792 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2793 hpriv->main_mask_reg_addr = hpriv->base +
2794 HC_SOC_MAIN_IRQ_MASK_OFS;
2796 /* global interrupt mask */
2797 writel(0, hpriv->main_mask_reg_addr);
2799 n_hc = mv_get_hc_count(host->ports[0]->flags);
2801 for (port = 0; port < host->n_ports; port++)
2802 hpriv->ops->read_preamp(hpriv, port, mmio);
2804 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2808 hpriv->ops->reset_flash(hpriv, mmio);
2809 hpriv->ops->reset_bus(host, mmio);
2810 hpriv->ops->enable_leds(hpriv, mmio);
2812 for (port = 0; port < host->n_ports; port++) {
2813 if (IS_GEN_II(hpriv)) {
2814 void __iomem *port_mmio = mv_port_base(mmio, port);
2816 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2817 ifctl |= (1 << 7); /* enable gen2i speed */
2818 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2819 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2822 hpriv->ops->phy_errata(hpriv, mmio, port);
2825 for (port = 0; port < host->n_ports; port++) {
2826 struct ata_port *ap = host->ports[port];
2827 void __iomem *port_mmio = mv_port_base(mmio, port);
2829 mv_port_init(&ap->ioaddr, port_mmio);
2832 if (HAS_PCI(host)) {
2833 unsigned int offset = port_mmio - mmio;
2834 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2835 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2840 for (hc = 0; hc < n_hc; hc++) {
2841 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2843 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2844 "(before clear)=0x%08x\n", hc,
2845 readl(hc_mmio + HC_CFG_OFS),
2846 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2848 /* Clear any currently outstanding hc interrupt conditions */
2849 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2852 if (HAS_PCI(host)) {
2853 /* Clear any currently outstanding host interrupt conditions */
2854 writelfl(0, mmio + hpriv->irq_cause_ofs);
2856 /* and unmask interrupt generation for host regs */
2857 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2858 if (IS_GEN_I(hpriv))
2859 writelfl(~HC_MAIN_MASKED_IRQS_5,
2860 hpriv->main_mask_reg_addr);
2862 writelfl(~HC_MAIN_MASKED_IRQS,
2863 hpriv->main_mask_reg_addr);
2865 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2866 "PCI int cause/mask=0x%08x/0x%08x\n",
2867 readl(hpriv->main_cause_reg_addr),
2868 readl(hpriv->main_mask_reg_addr),
2869 readl(mmio + hpriv->irq_cause_ofs),
2870 readl(mmio + hpriv->irq_mask_ofs));
2872 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2873 hpriv->main_mask_reg_addr);
2874 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2875 readl(hpriv->main_cause_reg_addr),
2876 readl(hpriv->main_mask_reg_addr));
2883 * mv_platform_probe - handle a positive probe of an soc Marvell
2885 * @pdev: platform device found
2888 * Inherited from caller.
2890 static int mv_platform_probe(struct platform_device *pdev)
2892 static int printed_version;
2893 const struct mv_sata_platform_data *mv_platform_data;
2894 const struct ata_port_info *ppi[] =
2895 { &mv_port_info[chip_soc], NULL };
2896 struct ata_host *host;
2897 struct mv_host_priv *hpriv;
2898 struct resource *res;
2901 if (!printed_version++)
2902 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2905 * Simple resource validation ..
2907 if (unlikely(pdev->num_resources != 2)) {
2908 dev_err(&pdev->dev, "invalid number of resources\n");
2913 * Get the register base first
2915 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2920 mv_platform_data = pdev->dev.platform_data;
2921 n_ports = mv_platform_data->n_ports;
2923 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2924 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2926 if (!host || !hpriv)
2928 host->private_data = hpriv;
2929 hpriv->n_ports = n_ports;
2932 hpriv->base = ioremap(res->start, res->end - res->start + 1);
2933 hpriv->base -= MV_SATAHC0_REG_BASE;
2935 /* initialize adapter */
2936 rc = mv_init_host(host, chip_soc);
2940 dev_printk(KERN_INFO, &pdev->dev,
2941 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2944 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2945 IRQF_SHARED, &mv6_sht);
2950 * mv_platform_remove - unplug a platform interface
2951 * @pdev: platform device
2953 * A platform bus SATA device has been unplugged. Perform the needed
2954 * cleanup. Also called on module unload for any active devices.
2956 static int __devexit mv_platform_remove(struct platform_device *pdev)
2958 struct device *dev = &pdev->dev;
2959 struct ata_host *host = dev_get_drvdata(dev);
2960 struct mv_host_priv *hpriv = host->private_data;
2961 void __iomem *base = hpriv->base;
2963 ata_host_detach(host);
2968 static struct platform_driver mv_platform_driver = {
2969 .probe = mv_platform_probe,
2970 .remove = __devexit_p(mv_platform_remove),
2973 .owner = THIS_MODULE,
2979 static int mv_pci_init_one(struct pci_dev *pdev,
2980 const struct pci_device_id *ent);
2983 static struct pci_driver mv_pci_driver = {
2985 .id_table = mv_pci_tbl,
2986 .probe = mv_pci_init_one,
2987 .remove = ata_pci_remove_one,
2993 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2996 /* move to PCI layer or libata core? */
2997 static int pci_go_64(struct pci_dev *pdev)
3001 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3002 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3004 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3006 dev_printk(KERN_ERR, &pdev->dev,
3007 "64-bit DMA enable failed\n");
3012 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3014 dev_printk(KERN_ERR, &pdev->dev,
3015 "32-bit DMA enable failed\n");
3018 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3020 dev_printk(KERN_ERR, &pdev->dev,
3021 "32-bit consistent DMA enable failed\n");
3030 * mv_print_info - Dump key info to kernel log for perusal.
3031 * @host: ATA host to print info about
3033 * FIXME: complete this.
3036 * Inherited from caller.
3038 static void mv_print_info(struct ata_host *host)
3040 struct pci_dev *pdev = to_pci_dev(host->dev);
3041 struct mv_host_priv *hpriv = host->private_data;
3043 const char *scc_s, *gen;
3045 /* Use this to determine the HW stepping of the chip so we know
3046 * what errata to workaround
3048 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3051 else if (scc == 0x01)
3056 if (IS_GEN_I(hpriv))
3058 else if (IS_GEN_II(hpriv))
3060 else if (IS_GEN_IIE(hpriv))
3065 dev_printk(KERN_INFO, &pdev->dev,
3066 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3067 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3068 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3071 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3073 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3075 if (!hpriv->crqb_pool)
3078 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3080 if (!hpriv->crpb_pool)
3083 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3085 if (!hpriv->sg_tbl_pool)
3092 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3093 * @pdev: PCI device found
3094 * @ent: PCI device ID entry for the matched host
3097 * Inherited from caller.
3099 static int mv_pci_init_one(struct pci_dev *pdev,
3100 const struct pci_device_id *ent)
3102 static int printed_version;
3103 unsigned int board_idx = (unsigned int)ent->driver_data;
3104 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3105 struct ata_host *host;
3106 struct mv_host_priv *hpriv;
3109 if (!printed_version++)
3110 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3113 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3115 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3116 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3117 if (!host || !hpriv)
3119 host->private_data = hpriv;
3120 hpriv->n_ports = n_ports;
3122 /* acquire resources */
3123 rc = pcim_enable_device(pdev);
3127 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3129 pcim_pin_device(pdev);
3132 host->iomap = pcim_iomap_table(pdev);
3133 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3135 rc = pci_go_64(pdev);
3139 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3143 /* initialize adapter */
3144 rc = mv_init_host(host, board_idx);
3148 /* Enable interrupts */
3149 if (msi && pci_enable_msi(pdev))
3152 mv_dump_pci_cfg(pdev, 0x68);
3153 mv_print_info(host);
3155 pci_set_master(pdev);
3156 pci_try_set_mwi(pdev);
3157 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3158 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3162 static int mv_platform_probe(struct platform_device *pdev);
3163 static int __devexit mv_platform_remove(struct platform_device *pdev);
3165 static int __init mv_init(void)
3169 rc = pci_register_driver(&mv_pci_driver);
3173 rc = platform_driver_register(&mv_platform_driver);
3177 pci_unregister_driver(&mv_pci_driver);
3182 static void __exit mv_exit(void)
3185 pci_unregister_driver(&mv_pci_driver);
3187 platform_driver_unregister(&mv_platform_driver);
3190 MODULE_AUTHOR("Brett Russ");
3191 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3192 MODULE_LICENSE("GPL");
3193 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3194 MODULE_VERSION(DRV_VERSION);
3197 module_param(msi, int, 0444);
3198 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3201 module_init(mv_init);
3202 module_exit(mv_exit);