2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Improve/fix IRQ and error handling sequences.
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42 6) Add port multiplier support (intermediate)
44 8) Develop a low-power-consumption strategy, and implement it.
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <linux/mbus.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_cmnd.h>
80 #include <scsi/scsi_device.h>
81 #include <linux/libata.h>
83 #define DRV_NAME "sata_mv"
84 #define DRV_VERSION "1.20"
87 /* BAR's are enumerated in terms of pci_resource_start() terms */
88 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
89 MV_IO_BAR = 2, /* offset 0x18: IO space */
90 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
92 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
93 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
96 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
97 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
98 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
99 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
100 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
101 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
103 MV_SATAHC0_REG_BASE = 0x20000,
104 MV_FLASH_CTL = 0x1046c,
105 MV_GPIO_PORT_CTL = 0x104f0,
106 MV_RESET_CFG = 0x180d8,
108 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
110 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
111 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
114 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
116 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
117 * CRPB needs alignment on a 256B boundary. Size == 256B
118 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
120 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
121 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
123 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
126 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
127 MV_PORT_HC_SHIFT = 2,
128 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
132 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
133 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
134 /* SoC integrated controllers, no PCI interface */
135 MV_FLAG_SOC = (1 << 28),
137 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
138 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
139 ATA_FLAG_PIO_POLLING,
140 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
142 CRQB_FLAG_READ = (1 << 0),
144 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
145 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
146 CRQB_CMD_ADDR_SHIFT = 8,
147 CRQB_CMD_CS = (0x2 << 11),
148 CRQB_CMD_LAST = (1 << 15),
150 CRPB_FLAG_STATUS_SHIFT = 8,
151 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
152 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
154 EPRD_FLAG_END_OF_TBL = (1 << 31),
156 /* PCI interface registers */
158 PCI_COMMAND_OFS = 0xc00,
160 PCI_MAIN_CMD_STS_OFS = 0xd30,
161 STOP_PCI_MASTER = (1 << 2),
162 PCI_MASTER_EMPTY = (1 << 3),
163 GLOB_SFT_RST = (1 << 4),
166 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
167 MV_PCI_DISC_TIMER = 0xd04,
168 MV_PCI_MSI_TRIGGER = 0xc38,
169 MV_PCI_SERR_MASK = 0xc28,
170 MV_PCI_XBAR_TMOUT = 0x1d04,
171 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
172 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
173 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
174 MV_PCI_ERR_COMMAND = 0x1d50,
176 PCI_IRQ_CAUSE_OFS = 0x1d58,
177 PCI_IRQ_MASK_OFS = 0x1d5c,
178 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
180 PCIE_IRQ_CAUSE_OFS = 0x1900,
181 PCIE_IRQ_MASK_OFS = 0x1910,
182 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
184 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
185 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
186 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
187 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
188 PORT0_ERR = (1 << 0), /* shift by port # */
189 PORT0_DONE = (1 << 1), /* shift by port # */
190 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
191 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
193 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
194 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
195 PORTS_0_3_COAL_DONE = (1 << 8),
196 PORTS_4_7_COAL_DONE = (1 << 17),
197 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
198 GPIO_INT = (1 << 22),
199 SELF_INT = (1 << 23),
200 TWSI_INT = (1 << 24),
201 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
202 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
203 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
204 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
205 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
207 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
209 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
211 /* SATAHC registers */
214 HC_IRQ_CAUSE_OFS = 0x14,
215 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
216 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
217 DEV_IRQ = (1 << 8), /* shift by port # */
219 /* Shadow block registers */
221 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
224 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
225 SATA_ACTIVE_OFS = 0x350,
226 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
233 SATA_INTERFACE_CTL = 0x050,
235 MV_M2_PREAMP_MASK = 0x7e0,
239 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
240 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
241 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
242 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
243 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
245 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
246 EDMA_ERR_IRQ_MASK_OFS = 0xc,
247 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
248 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
249 EDMA_ERR_DEV = (1 << 2), /* device error */
250 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
251 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
252 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
253 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
254 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
255 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
256 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
257 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
258 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
259 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
260 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
262 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
263 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
264 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
265 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
266 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
268 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
270 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
271 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
272 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
273 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
274 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
275 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
277 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
279 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
280 EDMA_ERR_OVERRUN_5 = (1 << 5),
281 EDMA_ERR_UNDERRUN_5 = (1 << 6),
283 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
284 EDMA_ERR_LNK_CTRL_RX_1 |
285 EDMA_ERR_LNK_CTRL_RX_3 |
286 EDMA_ERR_LNK_CTRL_TX,
288 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
298 EDMA_ERR_LNK_CTRL_RX_2 |
299 EDMA_ERR_LNK_DATA_RX |
300 EDMA_ERR_LNK_DATA_TX |
301 EDMA_ERR_TRANS_PROTO,
302 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
307 EDMA_ERR_UNDERRUN_5 |
308 EDMA_ERR_SELF_DIS_5 |
314 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
315 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
317 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
318 EDMA_REQ_Q_PTR_SHIFT = 5,
320 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
321 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
322 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
323 EDMA_RSP_Q_PTR_SHIFT = 3,
325 EDMA_CMD_OFS = 0x28, /* EDMA command register */
326 EDMA_EN = (1 << 0), /* enable EDMA */
327 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
328 ATA_RST = (1 << 2), /* reset trans/link/phy */
330 EDMA_IORDY_TMOUT = 0x34,
333 /* Host private flags (hp_flags) */
334 MV_HP_FLAG_MSI = (1 << 0),
335 MV_HP_ERRATA_50XXB0 = (1 << 1),
336 MV_HP_ERRATA_50XXB2 = (1 << 2),
337 MV_HP_ERRATA_60X1B2 = (1 << 3),
338 MV_HP_ERRATA_60X1C0 = (1 << 4),
339 MV_HP_ERRATA_XX42A0 = (1 << 5),
340 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
341 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
342 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
343 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
345 /* Port private flags (pp_flags) */
346 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
347 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
348 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
351 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
352 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
353 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
354 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
356 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
357 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
360 /* DMA boundary 0xffff is required by the s/g splitting
361 * we need on /length/ in mv_fill-sg().
363 MV_DMA_BOUNDARY = 0xffffU,
365 /* mask of register bits containing lower 32 bits
366 * of EDMA request queue DMA address
368 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
370 /* ditto, for response queue */
371 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
385 /* Command ReQuest Block: 32B */
401 /* Command ResPonse Block: 8B */
408 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
416 struct mv_port_priv {
417 struct mv_crqb *crqb;
419 struct mv_crpb *crpb;
421 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
422 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
424 unsigned int req_idx;
425 unsigned int resp_idx;
430 struct mv_port_signal {
435 struct mv_host_priv {
437 struct mv_port_signal signal[8];
438 const struct mv_hw_ops *ops;
441 void __iomem *main_cause_reg_addr;
442 void __iomem *main_mask_reg_addr;
447 * These consistent DMA memory pools give us guaranteed
448 * alignment for hardware-accessed data structures,
449 * and less memory waste in accomplishing the alignment.
451 struct dma_pool *crqb_pool;
452 struct dma_pool *crpb_pool;
453 struct dma_pool *sg_tbl_pool;
457 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
460 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
462 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
464 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
465 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
468 static void mv_irq_clear(struct ata_port *ap);
469 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
470 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
471 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
472 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
473 static int mv_port_start(struct ata_port *ap);
474 static void mv_port_stop(struct ata_port *ap);
475 static void mv_qc_prep(struct ata_queued_cmd *qc);
476 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
477 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
478 static void mv_error_handler(struct ata_port *ap);
479 static void mv_eh_freeze(struct ata_port *ap);
480 static void mv_eh_thaw(struct ata_port *ap);
481 static void mv6_dev_config(struct ata_device *dev);
483 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
485 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
486 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
488 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
490 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
491 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
493 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
495 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
496 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
498 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
500 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
501 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
503 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
505 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
506 void __iomem *mmio, unsigned int n_hc);
507 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
509 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
510 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
511 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
512 unsigned int port_no);
513 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
514 void __iomem *port_mmio, int want_ncq);
515 static int __mv_stop_dma(struct ata_port *ap);
517 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
518 * because we have to allow room for worst case splitting of
519 * PRDs for 64K boundaries in mv_fill_sg().
521 static struct scsi_host_template mv5_sht = {
522 .module = THIS_MODULE,
524 .ioctl = ata_scsi_ioctl,
525 .queuecommand = ata_scsi_queuecmd,
526 .can_queue = ATA_DEF_QUEUE,
527 .this_id = ATA_SHT_THIS_ID,
528 .sg_tablesize = MV_MAX_SG_CT / 2,
529 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
530 .emulated = ATA_SHT_EMULATED,
532 .proc_name = DRV_NAME,
533 .dma_boundary = MV_DMA_BOUNDARY,
534 .slave_configure = ata_scsi_slave_config,
535 .slave_destroy = ata_scsi_slave_destroy,
536 .bios_param = ata_std_bios_param,
539 static struct scsi_host_template mv6_sht = {
540 .module = THIS_MODULE,
542 .ioctl = ata_scsi_ioctl,
543 .queuecommand = ata_scsi_queuecmd,
544 .change_queue_depth = ata_scsi_change_queue_depth,
545 .can_queue = MV_MAX_Q_DEPTH - 1,
546 .this_id = ATA_SHT_THIS_ID,
547 .sg_tablesize = MV_MAX_SG_CT / 2,
548 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
549 .emulated = ATA_SHT_EMULATED,
551 .proc_name = DRV_NAME,
552 .dma_boundary = MV_DMA_BOUNDARY,
553 .slave_configure = ata_scsi_slave_config,
554 .slave_destroy = ata_scsi_slave_destroy,
555 .bios_param = ata_std_bios_param,
558 static const struct ata_port_operations mv5_ops = {
559 .tf_load = ata_tf_load,
560 .tf_read = ata_tf_read,
561 .check_status = ata_check_status,
562 .exec_command = ata_exec_command,
563 .dev_select = ata_std_dev_select,
565 .cable_detect = ata_cable_sata,
567 .qc_prep = mv_qc_prep,
568 .qc_issue = mv_qc_issue,
569 .data_xfer = ata_data_xfer,
571 .irq_clear = mv_irq_clear,
572 .irq_on = ata_irq_on,
574 .error_handler = mv_error_handler,
575 .freeze = mv_eh_freeze,
578 .scr_read = mv5_scr_read,
579 .scr_write = mv5_scr_write,
581 .port_start = mv_port_start,
582 .port_stop = mv_port_stop,
585 static const struct ata_port_operations mv6_ops = {
586 .dev_config = mv6_dev_config,
587 .tf_load = ata_tf_load,
588 .tf_read = ata_tf_read,
589 .check_status = ata_check_status,
590 .exec_command = ata_exec_command,
591 .dev_select = ata_std_dev_select,
593 .cable_detect = ata_cable_sata,
595 .qc_prep = mv_qc_prep,
596 .qc_issue = mv_qc_issue,
597 .data_xfer = ata_data_xfer,
599 .irq_clear = mv_irq_clear,
600 .irq_on = ata_irq_on,
602 .error_handler = mv_error_handler,
603 .freeze = mv_eh_freeze,
605 .qc_defer = ata_std_qc_defer,
607 .scr_read = mv_scr_read,
608 .scr_write = mv_scr_write,
610 .port_start = mv_port_start,
611 .port_stop = mv_port_stop,
614 static const struct ata_port_operations mv_iie_ops = {
615 .tf_load = ata_tf_load,
616 .tf_read = ata_tf_read,
617 .check_status = ata_check_status,
618 .exec_command = ata_exec_command,
619 .dev_select = ata_std_dev_select,
621 .cable_detect = ata_cable_sata,
623 .qc_prep = mv_qc_prep_iie,
624 .qc_issue = mv_qc_issue,
625 .data_xfer = ata_data_xfer,
627 .irq_clear = mv_irq_clear,
628 .irq_on = ata_irq_on,
630 .error_handler = mv_error_handler,
631 .freeze = mv_eh_freeze,
633 .qc_defer = ata_std_qc_defer,
635 .scr_read = mv_scr_read,
636 .scr_write = mv_scr_write,
638 .port_start = mv_port_start,
639 .port_stop = mv_port_stop,
642 static const struct ata_port_info mv_port_info[] = {
644 .flags = MV_COMMON_FLAGS,
645 .pio_mask = 0x1f, /* pio0-4 */
646 .udma_mask = ATA_UDMA6,
647 .port_ops = &mv5_ops,
650 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
651 .pio_mask = 0x1f, /* pio0-4 */
652 .udma_mask = ATA_UDMA6,
653 .port_ops = &mv5_ops,
656 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
657 .pio_mask = 0x1f, /* pio0-4 */
658 .udma_mask = ATA_UDMA6,
659 .port_ops = &mv5_ops,
662 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
664 .pio_mask = 0x1f, /* pio0-4 */
665 .udma_mask = ATA_UDMA6,
666 .port_ops = &mv6_ops,
669 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
670 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
671 .pio_mask = 0x1f, /* pio0-4 */
672 .udma_mask = ATA_UDMA6,
673 .port_ops = &mv6_ops,
676 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
678 .pio_mask = 0x1f, /* pio0-4 */
679 .udma_mask = ATA_UDMA6,
680 .port_ops = &mv_iie_ops,
683 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
685 .pio_mask = 0x1f, /* pio0-4 */
686 .udma_mask = ATA_UDMA6,
687 .port_ops = &mv_iie_ops,
690 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
691 .pio_mask = 0x1f, /* pio0-4 */
692 .udma_mask = ATA_UDMA6,
693 .port_ops = &mv_iie_ops,
697 static const struct pci_device_id mv_pci_tbl[] = {
698 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
699 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
700 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
701 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
702 /* RocketRAID 1740/174x have different identifiers */
703 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
704 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
706 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
707 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
708 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
709 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
710 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
712 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
715 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
717 /* Marvell 7042 support */
718 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
720 /* Highpoint RocketRAID PCIe series */
721 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
722 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
724 { } /* terminate list */
727 static const struct mv_hw_ops mv5xxx_ops = {
728 .phy_errata = mv5_phy_errata,
729 .enable_leds = mv5_enable_leds,
730 .read_preamp = mv5_read_preamp,
731 .reset_hc = mv5_reset_hc,
732 .reset_flash = mv5_reset_flash,
733 .reset_bus = mv5_reset_bus,
736 static const struct mv_hw_ops mv6xxx_ops = {
737 .phy_errata = mv6_phy_errata,
738 .enable_leds = mv6_enable_leds,
739 .read_preamp = mv6_read_preamp,
740 .reset_hc = mv6_reset_hc,
741 .reset_flash = mv6_reset_flash,
742 .reset_bus = mv_reset_pci_bus,
745 static const struct mv_hw_ops mv_soc_ops = {
746 .phy_errata = mv6_phy_errata,
747 .enable_leds = mv_soc_enable_leds,
748 .read_preamp = mv_soc_read_preamp,
749 .reset_hc = mv_soc_reset_hc,
750 .reset_flash = mv_soc_reset_flash,
751 .reset_bus = mv_soc_reset_bus,
758 static inline void writelfl(unsigned long data, void __iomem *addr)
761 (void) readl(addr); /* flush to avoid PCI posted write */
764 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
766 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
769 static inline unsigned int mv_hc_from_port(unsigned int port)
771 return port >> MV_PORT_HC_SHIFT;
774 static inline unsigned int mv_hardport_from_port(unsigned int port)
776 return port & MV_PORT_MASK;
779 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
782 return mv_hc_base(base, mv_hc_from_port(port));
785 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
787 return mv_hc_base_from_port(base, port) +
788 MV_SATAHC_ARBTR_REG_SZ +
789 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
792 static inline void __iomem *mv_host_base(struct ata_host *host)
794 struct mv_host_priv *hpriv = host->private_data;
798 static inline void __iomem *mv_ap_base(struct ata_port *ap)
800 return mv_port_base(mv_host_base(ap->host), ap->port_no);
803 static inline int mv_get_hc_count(unsigned long port_flags)
805 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
808 static void mv_irq_clear(struct ata_port *ap)
812 static void mv_set_edma_ptrs(void __iomem *port_mmio,
813 struct mv_host_priv *hpriv,
814 struct mv_port_priv *pp)
819 * initialize request queue
821 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
823 WARN_ON(pp->crqb_dma & 0x3ff);
824 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
825 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
826 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
828 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
829 writelfl((pp->crqb_dma & 0xffffffff) | index,
830 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
832 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
835 * initialize response queue
837 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
839 WARN_ON(pp->crpb_dma & 0xff);
840 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
842 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
843 writelfl((pp->crpb_dma & 0xffffffff) | index,
844 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
846 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
848 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
849 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
853 * mv_start_dma - Enable eDMA engine
854 * @base: port base address
855 * @pp: port private data
857 * Verify the local cache of the eDMA state is accurate with a
861 * Inherited from caller.
863 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
864 struct mv_port_priv *pp, u8 protocol)
866 int want_ncq = (protocol == ATA_PROT_NCQ);
868 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
869 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
870 if (want_ncq != using_ncq)
873 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
874 struct mv_host_priv *hpriv = ap->host->private_data;
875 int hard_port = mv_hardport_from_port(ap->port_no);
876 void __iomem *hc_mmio = mv_hc_base_from_port(
877 mv_host_base(ap->host), hard_port);
878 u32 hc_irq_cause, ipending;
880 /* clear EDMA event indicators, if any */
881 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
883 /* clear EDMA interrupt indicator, if any */
884 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
885 ipending = (DEV_IRQ << hard_port) |
886 (CRPB_DMA_DONE << hard_port);
887 if (hc_irq_cause & ipending) {
888 writelfl(hc_irq_cause & ~ipending,
889 hc_mmio + HC_IRQ_CAUSE_OFS);
892 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
894 /* clear FIS IRQ Cause */
895 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
897 mv_set_edma_ptrs(port_mmio, hpriv, pp);
899 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
900 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
902 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
906 * __mv_stop_dma - Disable eDMA engine
907 * @ap: ATA channel to manipulate
909 * Verify the local cache of the eDMA state is accurate with a
913 * Inherited from caller.
915 static int __mv_stop_dma(struct ata_port *ap)
917 void __iomem *port_mmio = mv_ap_base(ap);
918 struct mv_port_priv *pp = ap->private_data;
922 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
923 /* Disable EDMA if active. The disable bit auto clears.
925 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
926 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
928 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
931 /* now properly wait for the eDMA to stop */
932 for (i = 1000; i > 0; i--) {
933 reg = readl(port_mmio + EDMA_CMD_OFS);
934 if (!(reg & EDMA_EN))
941 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
948 static int mv_stop_dma(struct ata_port *ap)
953 spin_lock_irqsave(&ap->host->lock, flags);
954 rc = __mv_stop_dma(ap);
955 spin_unlock_irqrestore(&ap->host->lock, flags);
961 static void mv_dump_mem(void __iomem *start, unsigned bytes)
964 for (b = 0; b < bytes; ) {
965 DPRINTK("%p: ", start + b);
966 for (w = 0; b < bytes && w < 4; w++) {
967 printk("%08x ", readl(start + b));
975 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
980 for (b = 0; b < bytes; ) {
981 DPRINTK("%02x: ", b);
982 for (w = 0; b < bytes && w < 4; w++) {
983 (void) pci_read_config_dword(pdev, b, &dw);
991 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
992 struct pci_dev *pdev)
995 void __iomem *hc_base = mv_hc_base(mmio_base,
996 port >> MV_PORT_HC_SHIFT);
997 void __iomem *port_base;
998 int start_port, num_ports, p, start_hc, num_hcs, hc;
1001 start_hc = start_port = 0;
1002 num_ports = 8; /* shld be benign for 4 port devs */
1005 start_hc = port >> MV_PORT_HC_SHIFT;
1007 num_ports = num_hcs = 1;
1009 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1010 num_ports > 1 ? num_ports - 1 : start_port);
1013 DPRINTK("PCI config space regs:\n");
1014 mv_dump_pci_cfg(pdev, 0x68);
1016 DPRINTK("PCI regs:\n");
1017 mv_dump_mem(mmio_base+0xc00, 0x3c);
1018 mv_dump_mem(mmio_base+0xd00, 0x34);
1019 mv_dump_mem(mmio_base+0xf00, 0x4);
1020 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1021 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1022 hc_base = mv_hc_base(mmio_base, hc);
1023 DPRINTK("HC regs (HC %i):\n", hc);
1024 mv_dump_mem(hc_base, 0x1c);
1026 for (p = start_port; p < start_port + num_ports; p++) {
1027 port_base = mv_port_base(mmio_base, p);
1028 DPRINTK("EDMA regs (port %i):\n", p);
1029 mv_dump_mem(port_base, 0x54);
1030 DPRINTK("SATA regs (port %i):\n", p);
1031 mv_dump_mem(port_base+0x300, 0x60);
1036 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1040 switch (sc_reg_in) {
1044 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1047 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1056 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1058 unsigned int ofs = mv_scr_offset(sc_reg_in);
1060 if (ofs != 0xffffffffU) {
1061 *val = readl(mv_ap_base(ap) + ofs);
1067 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1069 unsigned int ofs = mv_scr_offset(sc_reg_in);
1071 if (ofs != 0xffffffffU) {
1072 writelfl(val, mv_ap_base(ap) + ofs);
1078 static void mv6_dev_config(struct ata_device *adev)
1081 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1082 * See mv_qc_prep() for more info.
1084 if (adev->flags & ATA_DFLAG_NCQ)
1085 if (adev->max_sectors > ATA_MAX_SECTORS)
1086 adev->max_sectors = ATA_MAX_SECTORS;
1089 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1090 void __iomem *port_mmio, int want_ncq)
1094 /* set up non-NCQ EDMA configuration */
1095 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1097 if (IS_GEN_I(hpriv))
1098 cfg |= (1 << 8); /* enab config burst size mask */
1100 else if (IS_GEN_II(hpriv))
1101 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1103 else if (IS_GEN_IIE(hpriv)) {
1104 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1105 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1106 cfg |= (1 << 18); /* enab early completion */
1107 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1111 cfg |= EDMA_CFG_NCQ;
1112 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1114 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1116 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1119 static void mv_port_free_dma_mem(struct ata_port *ap)
1121 struct mv_host_priv *hpriv = ap->host->private_data;
1122 struct mv_port_priv *pp = ap->private_data;
1126 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1130 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1134 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1135 * For later hardware, we have one unique sg_tbl per NCQ tag.
1137 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1138 if (pp->sg_tbl[tag]) {
1139 if (tag == 0 || !IS_GEN_I(hpriv))
1140 dma_pool_free(hpriv->sg_tbl_pool,
1142 pp->sg_tbl_dma[tag]);
1143 pp->sg_tbl[tag] = NULL;
1149 * mv_port_start - Port specific init/start routine.
1150 * @ap: ATA channel to manipulate
1152 * Allocate and point to DMA memory, init port private memory,
1156 * Inherited from caller.
1158 static int mv_port_start(struct ata_port *ap)
1160 struct device *dev = ap->host->dev;
1161 struct mv_host_priv *hpriv = ap->host->private_data;
1162 struct mv_port_priv *pp;
1163 void __iomem *port_mmio = mv_ap_base(ap);
1164 unsigned long flags;
1167 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1170 ap->private_data = pp;
1172 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1175 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1177 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1179 goto out_port_free_dma_mem;
1180 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1183 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1184 * For later hardware, we need one unique sg_tbl per NCQ tag.
1186 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1187 if (tag == 0 || !IS_GEN_I(hpriv)) {
1188 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1189 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1190 if (!pp->sg_tbl[tag])
1191 goto out_port_free_dma_mem;
1193 pp->sg_tbl[tag] = pp->sg_tbl[0];
1194 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1198 spin_lock_irqsave(&ap->host->lock, flags);
1200 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1201 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1203 spin_unlock_irqrestore(&ap->host->lock, flags);
1205 /* Don't turn on EDMA here...do it before DMA commands only. Else
1206 * we'll be unable to send non-data, PIO, etc due to restricted access
1211 out_port_free_dma_mem:
1212 mv_port_free_dma_mem(ap);
1217 * mv_port_stop - Port specific cleanup/stop routine.
1218 * @ap: ATA channel to manipulate
1220 * Stop DMA, cleanup port memory.
1223 * This routine uses the host lock to protect the DMA stop.
1225 static void mv_port_stop(struct ata_port *ap)
1228 mv_port_free_dma_mem(ap);
1232 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1233 * @qc: queued command whose SG list to source from
1235 * Populate the SG list and mark the last entry.
1238 * Inherited from caller.
1240 static void mv_fill_sg(struct ata_queued_cmd *qc)
1242 struct mv_port_priv *pp = qc->ap->private_data;
1243 struct scatterlist *sg;
1244 struct mv_sg *mv_sg, *last_sg = NULL;
1247 mv_sg = pp->sg_tbl[qc->tag];
1248 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1249 dma_addr_t addr = sg_dma_address(sg);
1250 u32 sg_len = sg_dma_len(sg);
1253 u32 offset = addr & 0xffff;
1256 if ((offset + sg_len > 0x10000))
1257 len = 0x10000 - offset;
1259 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1260 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1261 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1271 if (likely(last_sg))
1272 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1275 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1277 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1278 (last ? CRQB_CMD_LAST : 0);
1279 *cmdw = cpu_to_le16(tmp);
1283 * mv_qc_prep - Host specific command preparation.
1284 * @qc: queued command to prepare
1286 * This routine simply redirects to the general purpose routine
1287 * if command is not DMA. Else, it handles prep of the CRQB
1288 * (command request block), does some sanity checking, and calls
1289 * the SG load routine.
1292 * Inherited from caller.
1294 static void mv_qc_prep(struct ata_queued_cmd *qc)
1296 struct ata_port *ap = qc->ap;
1297 struct mv_port_priv *pp = ap->private_data;
1299 struct ata_taskfile *tf;
1303 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1304 (qc->tf.protocol != ATA_PROT_NCQ))
1307 /* Fill in command request block
1309 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1310 flags |= CRQB_FLAG_READ;
1311 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1312 flags |= qc->tag << CRQB_TAG_SHIFT;
1314 /* get current queue index from software */
1315 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1317 pp->crqb[in_index].sg_addr =
1318 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1319 pp->crqb[in_index].sg_addr_hi =
1320 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1321 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1323 cw = &pp->crqb[in_index].ata_cmd[0];
1326 /* Sadly, the CRQB cannot accomodate all registers--there are
1327 * only 11 bytes...so we must pick and choose required
1328 * registers based on the command. So, we drop feature and
1329 * hob_feature for [RW] DMA commands, but they are needed for
1330 * NCQ. NCQ will drop hob_nsect.
1332 switch (tf->command) {
1334 case ATA_CMD_READ_EXT:
1336 case ATA_CMD_WRITE_EXT:
1337 case ATA_CMD_WRITE_FUA_EXT:
1338 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1340 case ATA_CMD_FPDMA_READ:
1341 case ATA_CMD_FPDMA_WRITE:
1342 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1343 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1346 /* The only other commands EDMA supports in non-queued and
1347 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1348 * of which are defined/used by Linux. If we get here, this
1349 * driver needs work.
1351 * FIXME: modify libata to give qc_prep a return value and
1352 * return error here.
1354 BUG_ON(tf->command);
1357 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1358 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1359 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1360 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1361 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1362 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1363 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1364 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1365 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1367 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1373 * mv_qc_prep_iie - Host specific command preparation.
1374 * @qc: queued command to prepare
1376 * This routine simply redirects to the general purpose routine
1377 * if command is not DMA. Else, it handles prep of the CRQB
1378 * (command request block), does some sanity checking, and calls
1379 * the SG load routine.
1382 * Inherited from caller.
1384 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1386 struct ata_port *ap = qc->ap;
1387 struct mv_port_priv *pp = ap->private_data;
1388 struct mv_crqb_iie *crqb;
1389 struct ata_taskfile *tf;
1393 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1394 (qc->tf.protocol != ATA_PROT_NCQ))
1397 /* Fill in Gen IIE command request block
1399 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1400 flags |= CRQB_FLAG_READ;
1402 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1403 flags |= qc->tag << CRQB_TAG_SHIFT;
1404 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1406 /* get current queue index from software */
1407 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1409 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1410 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1411 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1412 crqb->flags = cpu_to_le32(flags);
1415 crqb->ata_cmd[0] = cpu_to_le32(
1416 (tf->command << 16) |
1419 crqb->ata_cmd[1] = cpu_to_le32(
1425 crqb->ata_cmd[2] = cpu_to_le32(
1426 (tf->hob_lbal << 0) |
1427 (tf->hob_lbam << 8) |
1428 (tf->hob_lbah << 16) |
1429 (tf->hob_feature << 24)
1431 crqb->ata_cmd[3] = cpu_to_le32(
1433 (tf->hob_nsect << 8)
1436 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1442 * mv_qc_issue - Initiate a command to the host
1443 * @qc: queued command to start
1445 * This routine simply redirects to the general purpose routine
1446 * if command is not DMA. Else, it sanity checks our local
1447 * caches of the request producer/consumer indices then enables
1448 * DMA and bumps the request producer index.
1451 * Inherited from caller.
1453 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1455 struct ata_port *ap = qc->ap;
1456 void __iomem *port_mmio = mv_ap_base(ap);
1457 struct mv_port_priv *pp = ap->private_data;
1460 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1461 (qc->tf.protocol != ATA_PROT_NCQ)) {
1462 /* We're about to send a non-EDMA capable command to the
1463 * port. Turn off EDMA so there won't be problems accessing
1464 * shadow block, etc registers.
1467 return ata_qc_issue_prot(qc);
1470 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1474 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1476 /* and write the request in pointer to kick the EDMA to life */
1477 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1478 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1484 * mv_err_intr - Handle error interrupts on the port
1485 * @ap: ATA channel to manipulate
1486 * @reset_allowed: bool: 0 == don't trigger from reset here
1488 * In most cases, just clear the interrupt and move on. However,
1489 * some cases require an eDMA reset, which is done right before
1490 * the COMRESET in mv_phy_reset(). The SERR case requires a
1491 * clear of pending errors in the SATA SERROR register. Finally,
1492 * if the port disabled DMA, update our cached copy to match.
1495 * Inherited from caller.
1497 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1499 void __iomem *port_mmio = mv_ap_base(ap);
1500 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1501 struct mv_port_priv *pp = ap->private_data;
1502 struct mv_host_priv *hpriv = ap->host->private_data;
1503 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1504 unsigned int action = 0, err_mask = 0;
1505 struct ata_eh_info *ehi = &ap->link.eh_info;
1507 ata_ehi_clear_desc(ehi);
1509 if (!edma_enabled) {
1510 /* just a guess: do we need to do this? should we
1511 * expand this, and do it in all cases?
1513 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1514 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1517 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1519 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1522 * all generations share these EDMA error cause bits
1525 if (edma_err_cause & EDMA_ERR_DEV)
1526 err_mask |= AC_ERR_DEV;
1527 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1528 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1529 EDMA_ERR_INTRL_PAR)) {
1530 err_mask |= AC_ERR_ATA_BUS;
1531 action |= ATA_EH_HARDRESET;
1532 ata_ehi_push_desc(ehi, "parity error");
1534 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1535 ata_ehi_hotplugged(ehi);
1536 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1537 "dev disconnect" : "dev connect");
1538 action |= ATA_EH_HARDRESET;
1541 if (IS_GEN_I(hpriv)) {
1542 eh_freeze_mask = EDMA_EH_FREEZE_5;
1544 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1545 pp = ap->private_data;
1546 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1547 ata_ehi_push_desc(ehi, "EDMA self-disable");
1550 eh_freeze_mask = EDMA_EH_FREEZE;
1552 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1553 pp = ap->private_data;
1554 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1555 ata_ehi_push_desc(ehi, "EDMA self-disable");
1558 if (edma_err_cause & EDMA_ERR_SERR) {
1559 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1560 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1561 err_mask = AC_ERR_ATA_BUS;
1562 action |= ATA_EH_HARDRESET;
1566 /* Clear EDMA now that SERR cleanup done */
1567 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1570 err_mask = AC_ERR_OTHER;
1571 action |= ATA_EH_HARDRESET;
1574 ehi->serror |= serr;
1575 ehi->action |= action;
1578 qc->err_mask |= err_mask;
1580 ehi->err_mask |= err_mask;
1582 if (edma_err_cause & eh_freeze_mask)
1583 ata_port_freeze(ap);
1588 static void mv_intr_pio(struct ata_port *ap)
1590 struct ata_queued_cmd *qc;
1593 /* ignore spurious intr if drive still BUSY */
1594 ata_status = readb(ap->ioaddr.status_addr);
1595 if (unlikely(ata_status & ATA_BUSY))
1598 /* get active ATA command */
1599 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1600 if (unlikely(!qc)) /* no active tag */
1602 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1605 /* and finally, complete the ATA command */
1606 qc->err_mask |= ac_err_mask(ata_status);
1607 ata_qc_complete(qc);
1610 static void mv_intr_edma(struct ata_port *ap)
1612 void __iomem *port_mmio = mv_ap_base(ap);
1613 struct mv_host_priv *hpriv = ap->host->private_data;
1614 struct mv_port_priv *pp = ap->private_data;
1615 struct ata_queued_cmd *qc;
1616 u32 out_index, in_index;
1617 bool work_done = false;
1619 /* get h/w response queue pointer */
1620 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1621 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1627 /* get s/w response queue last-read pointer, and compare */
1628 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1629 if (in_index == out_index)
1632 /* 50xx: get active ATA command */
1633 if (IS_GEN_I(hpriv))
1634 tag = ap->link.active_tag;
1636 /* Gen II/IIE: get active ATA command via tag, to enable
1637 * support for queueing. this works transparently for
1638 * queued and non-queued modes.
1641 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1643 qc = ata_qc_from_tag(ap, tag);
1645 /* For non-NCQ mode, the lower 8 bits of status
1646 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1647 * which should be zero if all went well.
1649 status = le16_to_cpu(pp->crpb[out_index].flags);
1650 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1651 mv_err_intr(ap, qc);
1655 /* and finally, complete the ATA command */
1658 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1659 ata_qc_complete(qc);
1662 /* advance software response queue pointer, to
1663 * indicate (after the loop completes) to hardware
1664 * that we have consumed a response queue entry.
1671 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1672 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1673 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1677 * mv_host_intr - Handle all interrupts on the given host controller
1678 * @host: host specific structure
1679 * @relevant: port error bits relevant to this host controller
1680 * @hc: which host controller we're to look at
1682 * Read then write clear the HC interrupt status then walk each
1683 * port connected to the HC and see if it needs servicing. Port
1684 * success ints are reported in the HC interrupt status reg, the
1685 * port error ints are reported in the higher level main
1686 * interrupt status register and thus are passed in via the
1687 * 'relevant' argument.
1690 * Inherited from caller.
1692 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1694 struct mv_host_priv *hpriv = host->private_data;
1695 void __iomem *mmio = hpriv->base;
1696 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1698 int port, port0, last_port;
1703 port0 = MV_PORTS_PER_HC;
1706 last_port = port0 + MV_PORTS_PER_HC;
1708 last_port = port0 + hpriv->n_ports;
1709 /* we'll need the HC success int register in most cases */
1710 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1714 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1716 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1717 hc, relevant, hc_irq_cause);
1719 for (port = port0; port < last_port; port++) {
1720 struct ata_port *ap = host->ports[port];
1721 struct mv_port_priv *pp;
1722 int have_err_bits, hard_port, shift;
1724 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1727 pp = ap->private_data;
1729 shift = port << 1; /* (port * 2) */
1730 if (port >= MV_PORTS_PER_HC) {
1731 shift++; /* skip bit 8 in the HC Main IRQ reg */
1733 have_err_bits = ((PORT0_ERR << shift) & relevant);
1735 if (unlikely(have_err_bits)) {
1736 struct ata_queued_cmd *qc;
1738 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1739 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1742 mv_err_intr(ap, qc);
1746 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1748 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1749 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1752 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1759 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1761 struct mv_host_priv *hpriv = host->private_data;
1762 struct ata_port *ap;
1763 struct ata_queued_cmd *qc;
1764 struct ata_eh_info *ehi;
1765 unsigned int i, err_mask, printed = 0;
1768 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1770 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1773 DPRINTK("All regs @ PCI error\n");
1774 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1776 writelfl(0, mmio + hpriv->irq_cause_ofs);
1778 for (i = 0; i < host->n_ports; i++) {
1779 ap = host->ports[i];
1780 if (!ata_link_offline(&ap->link)) {
1781 ehi = &ap->link.eh_info;
1782 ata_ehi_clear_desc(ehi);
1784 ata_ehi_push_desc(ehi,
1785 "PCI err cause 0x%08x", err_cause);
1786 err_mask = AC_ERR_HOST_BUS;
1787 ehi->action = ATA_EH_HARDRESET;
1788 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1790 qc->err_mask |= err_mask;
1792 ehi->err_mask |= err_mask;
1794 ata_port_freeze(ap);
1800 * mv_interrupt - Main interrupt event handler
1802 * @dev_instance: private data; in this case the host structure
1804 * Read the read only register to determine if any host
1805 * controllers have pending interrupts. If so, call lower level
1806 * routine to handle. Also check for PCI errors which are only
1810 * This routine holds the host lock while processing pending
1813 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1815 struct ata_host *host = dev_instance;
1816 struct mv_host_priv *hpriv = host->private_data;
1817 unsigned int hc, handled = 0, n_hcs;
1818 void __iomem *mmio = hpriv->base;
1819 u32 irq_stat, irq_mask;
1821 spin_lock(&host->lock);
1823 irq_stat = readl(hpriv->main_cause_reg_addr);
1824 irq_mask = readl(hpriv->main_mask_reg_addr);
1826 /* check the cases where we either have nothing pending or have read
1827 * a bogus register value which can indicate HW removal or PCI fault
1829 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1832 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1834 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1835 mv_pci_error(host, mmio);
1837 goto out_unlock; /* skip all other HC irq handling */
1840 for (hc = 0; hc < n_hcs; hc++) {
1841 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1843 mv_host_intr(host, relevant, hc);
1849 spin_unlock(&host->lock);
1851 return IRQ_RETVAL(handled);
1854 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1856 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1857 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1859 return hc_mmio + ofs;
1862 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1866 switch (sc_reg_in) {
1870 ofs = sc_reg_in * sizeof(u32);
1879 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1881 struct mv_host_priv *hpriv = ap->host->private_data;
1882 void __iomem *mmio = hpriv->base;
1883 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1884 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1886 if (ofs != 0xffffffffU) {
1887 *val = readl(addr + ofs);
1893 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1895 struct mv_host_priv *hpriv = ap->host->private_data;
1896 void __iomem *mmio = hpriv->base;
1897 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1898 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1900 if (ofs != 0xffffffffU) {
1901 writelfl(val, addr + ofs);
1907 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1909 struct pci_dev *pdev = to_pci_dev(host->dev);
1912 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1915 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1917 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1920 mv_reset_pci_bus(host, mmio);
1923 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1925 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1928 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1931 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1934 tmp = readl(phy_mmio + MV5_PHY_MODE);
1936 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1937 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1940 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1944 writel(0, mmio + MV_GPIO_PORT_CTL);
1946 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1948 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1950 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1953 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1956 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1957 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1959 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1962 tmp = readl(phy_mmio + MV5_LT_MODE);
1964 writel(tmp, phy_mmio + MV5_LT_MODE);
1966 tmp = readl(phy_mmio + MV5_PHY_CTL);
1969 writel(tmp, phy_mmio + MV5_PHY_CTL);
1972 tmp = readl(phy_mmio + MV5_PHY_MODE);
1974 tmp |= hpriv->signal[port].pre;
1975 tmp |= hpriv->signal[port].amps;
1976 writel(tmp, phy_mmio + MV5_PHY_MODE);
1981 #define ZERO(reg) writel(0, port_mmio + (reg))
1982 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1985 void __iomem *port_mmio = mv_port_base(mmio, port);
1987 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1989 mv_channel_reset(hpriv, mmio, port);
1991 ZERO(0x028); /* command */
1992 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1993 ZERO(0x004); /* timer */
1994 ZERO(0x008); /* irq err cause */
1995 ZERO(0x00c); /* irq err mask */
1996 ZERO(0x010); /* rq bah */
1997 ZERO(0x014); /* rq inp */
1998 ZERO(0x018); /* rq outp */
1999 ZERO(0x01c); /* respq bah */
2000 ZERO(0x024); /* respq outp */
2001 ZERO(0x020); /* respq inp */
2002 ZERO(0x02c); /* test control */
2003 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2007 #define ZERO(reg) writel(0, hc_mmio + (reg))
2008 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2011 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2019 tmp = readl(hc_mmio + 0x20);
2022 writel(tmp, hc_mmio + 0x20);
2026 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2029 unsigned int hc, port;
2031 for (hc = 0; hc < n_hc; hc++) {
2032 for (port = 0; port < MV_PORTS_PER_HC; port++)
2033 mv5_reset_hc_port(hpriv, mmio,
2034 (hc * MV_PORTS_PER_HC) + port);
2036 mv5_reset_one_hc(hpriv, mmio, hc);
2043 #define ZERO(reg) writel(0, mmio + (reg))
2044 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2046 struct mv_host_priv *hpriv = host->private_data;
2049 tmp = readl(mmio + MV_PCI_MODE);
2051 writel(tmp, mmio + MV_PCI_MODE);
2053 ZERO(MV_PCI_DISC_TIMER);
2054 ZERO(MV_PCI_MSI_TRIGGER);
2055 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2056 ZERO(HC_MAIN_IRQ_MASK_OFS);
2057 ZERO(MV_PCI_SERR_MASK);
2058 ZERO(hpriv->irq_cause_ofs);
2059 ZERO(hpriv->irq_mask_ofs);
2060 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2061 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2062 ZERO(MV_PCI_ERR_ATTRIBUTE);
2063 ZERO(MV_PCI_ERR_COMMAND);
2067 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2071 mv5_reset_flash(hpriv, mmio);
2073 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2075 tmp |= (1 << 5) | (1 << 6);
2076 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2080 * mv6_reset_hc - Perform the 6xxx global soft reset
2081 * @mmio: base address of the HBA
2083 * This routine only applies to 6xxx parts.
2086 * Inherited from caller.
2088 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2091 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2095 /* Following procedure defined in PCI "main command and status
2099 writel(t | STOP_PCI_MASTER, reg);
2101 for (i = 0; i < 1000; i++) {
2104 if (PCI_MASTER_EMPTY & t)
2107 if (!(PCI_MASTER_EMPTY & t)) {
2108 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2116 writel(t | GLOB_SFT_RST, reg);
2119 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2121 if (!(GLOB_SFT_RST & t)) {
2122 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2127 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2130 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2133 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2135 if (GLOB_SFT_RST & t) {
2136 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2143 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2146 void __iomem *port_mmio;
2149 tmp = readl(mmio + MV_RESET_CFG);
2150 if ((tmp & (1 << 0)) == 0) {
2151 hpriv->signal[idx].amps = 0x7 << 8;
2152 hpriv->signal[idx].pre = 0x1 << 5;
2156 port_mmio = mv_port_base(mmio, idx);
2157 tmp = readl(port_mmio + PHY_MODE2);
2159 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2160 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2163 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2165 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2168 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2171 void __iomem *port_mmio = mv_port_base(mmio, port);
2173 u32 hp_flags = hpriv->hp_flags;
2175 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2177 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2180 if (fix_phy_mode2) {
2181 m2 = readl(port_mmio + PHY_MODE2);
2184 writel(m2, port_mmio + PHY_MODE2);
2188 m2 = readl(port_mmio + PHY_MODE2);
2189 m2 &= ~((1 << 16) | (1 << 31));
2190 writel(m2, port_mmio + PHY_MODE2);
2195 /* who knows what this magic does */
2196 tmp = readl(port_mmio + PHY_MODE3);
2199 writel(tmp, port_mmio + PHY_MODE3);
2201 if (fix_phy_mode4) {
2204 m4 = readl(port_mmio + PHY_MODE4);
2206 if (hp_flags & MV_HP_ERRATA_60X1B2)
2207 tmp = readl(port_mmio + 0x310);
2209 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2211 writel(m4, port_mmio + PHY_MODE4);
2213 if (hp_flags & MV_HP_ERRATA_60X1B2)
2214 writel(tmp, port_mmio + 0x310);
2217 /* Revert values of pre-emphasis and signal amps to the saved ones */
2218 m2 = readl(port_mmio + PHY_MODE2);
2220 m2 &= ~MV_M2_PREAMP_MASK;
2221 m2 |= hpriv->signal[port].amps;
2222 m2 |= hpriv->signal[port].pre;
2225 /* according to mvSata 3.6.1, some IIE values are fixed */
2226 if (IS_GEN_IIE(hpriv)) {
2231 writel(m2, port_mmio + PHY_MODE2);
2234 /* TODO: use the generic LED interface to configure the SATA Presence */
2235 /* & Acitivy LEDs on the board */
2236 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2242 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2245 void __iomem *port_mmio;
2248 port_mmio = mv_port_base(mmio, idx);
2249 tmp = readl(port_mmio + PHY_MODE2);
2251 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2252 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2256 #define ZERO(reg) writel(0, port_mmio + (reg))
2257 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2258 void __iomem *mmio, unsigned int port)
2260 void __iomem *port_mmio = mv_port_base(mmio, port);
2262 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2264 mv_channel_reset(hpriv, mmio, port);
2266 ZERO(0x028); /* command */
2267 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2268 ZERO(0x004); /* timer */
2269 ZERO(0x008); /* irq err cause */
2270 ZERO(0x00c); /* irq err mask */
2271 ZERO(0x010); /* rq bah */
2272 ZERO(0x014); /* rq inp */
2273 ZERO(0x018); /* rq outp */
2274 ZERO(0x01c); /* respq bah */
2275 ZERO(0x024); /* respq outp */
2276 ZERO(0x020); /* respq inp */
2277 ZERO(0x02c); /* test control */
2278 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2283 #define ZERO(reg) writel(0, hc_mmio + (reg))
2284 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2287 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2297 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2298 void __iomem *mmio, unsigned int n_hc)
2302 for (port = 0; port < hpriv->n_ports; port++)
2303 mv_soc_reset_hc_port(hpriv, mmio, port);
2305 mv_soc_reset_one_hc(hpriv, mmio);
2310 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2316 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2321 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2322 unsigned int port_no)
2324 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2326 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2328 if (IS_GEN_II(hpriv)) {
2329 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2330 ifctl |= (1 << 7); /* enable gen2i speed */
2331 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2332 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2335 udelay(25); /* allow reset propagation */
2337 /* Spec never mentions clearing the bit. Marvell's driver does
2338 * clear the bit, however.
2340 writelfl(0, port_mmio + EDMA_CMD_OFS);
2342 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2344 if (IS_GEN_I(hpriv))
2349 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2350 * @ap: ATA channel to manipulate
2352 * Part of this is taken from __sata_phy_reset and modified to
2353 * not sleep since this routine gets called from interrupt level.
2356 * Inherited from caller. This is coded to safe to call at
2357 * interrupt level, i.e. it does not sleep.
2359 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2360 unsigned long deadline)
2362 struct mv_port_priv *pp = ap->private_data;
2363 struct mv_host_priv *hpriv = ap->host->private_data;
2364 void __iomem *port_mmio = mv_ap_base(ap);
2368 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2372 u32 sstatus, serror, scontrol;
2374 mv_scr_read(ap, SCR_STATUS, &sstatus);
2375 mv_scr_read(ap, SCR_ERROR, &serror);
2376 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2377 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2378 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2382 /* Issue COMRESET via SControl */
2384 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2387 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2391 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2392 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2396 } while (time_before(jiffies, deadline));
2398 /* work around errata */
2399 if (IS_GEN_II(hpriv) &&
2400 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2402 goto comreset_retry;
2406 u32 sstatus, serror, scontrol;
2408 mv_scr_read(ap, SCR_STATUS, &sstatus);
2409 mv_scr_read(ap, SCR_ERROR, &serror);
2410 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2411 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2412 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2416 if (ata_link_offline(&ap->link)) {
2417 *class = ATA_DEV_NONE;
2421 /* even after SStatus reflects that device is ready,
2422 * it seems to take a while for link to be fully
2423 * established (and thus Status no longer 0x80/0x7F),
2424 * so we poll a bit for that, here.
2428 u8 drv_stat = ata_check_status(ap);
2429 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2434 if (time_after(jiffies, deadline))
2438 /* FIXME: if we passed the deadline, the following
2439 * code probably produces an invalid result
2442 /* finally, read device signature from TF registers */
2443 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2445 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2447 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2452 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2454 struct ata_port *ap = link->ap;
2455 struct mv_port_priv *pp = ap->private_data;
2456 struct ata_eh_context *ehc = &link->eh_context;
2459 rc = mv_stop_dma(ap);
2461 ehc->i.action |= ATA_EH_HARDRESET;
2463 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2464 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2465 ehc->i.action |= ATA_EH_HARDRESET;
2468 /* if we're about to do hardreset, nothing more to do */
2469 if (ehc->i.action & ATA_EH_HARDRESET)
2472 if (ata_link_online(link))
2473 rc = ata_wait_ready(ap, deadline);
2480 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2481 unsigned long deadline)
2483 struct ata_port *ap = link->ap;
2484 struct mv_host_priv *hpriv = ap->host->private_data;
2485 void __iomem *mmio = hpriv->base;
2489 mv_channel_reset(hpriv, mmio, ap->port_no);
2491 mv_phy_reset(ap, class, deadline);
2496 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2498 struct ata_port *ap = link->ap;
2501 /* print link status */
2502 sata_print_link_status(link);
2505 sata_scr_read(link, SCR_ERROR, &serr);
2506 sata_scr_write_flush(link, SCR_ERROR, serr);
2508 /* bail out if no device is present */
2509 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2510 DPRINTK("EXIT, no device\n");
2514 /* set up device control */
2515 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2518 static void mv_error_handler(struct ata_port *ap)
2520 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2521 mv_hardreset, mv_postreset);
2524 static void mv_eh_freeze(struct ata_port *ap)
2526 struct mv_host_priv *hpriv = ap->host->private_data;
2527 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2531 /* FIXME: handle coalescing completion events properly */
2533 shift = ap->port_no * 2;
2537 mask = 0x3 << shift;
2539 /* disable assertion of portN err, done events */
2540 tmp = readl(hpriv->main_mask_reg_addr);
2541 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2544 static void mv_eh_thaw(struct ata_port *ap)
2546 struct mv_host_priv *hpriv = ap->host->private_data;
2547 void __iomem *mmio = hpriv->base;
2548 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2549 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2550 void __iomem *port_mmio = mv_ap_base(ap);
2551 u32 tmp, mask, hc_irq_cause;
2552 unsigned int shift, hc_port_no = ap->port_no;
2554 /* FIXME: handle coalescing completion events properly */
2556 shift = ap->port_no * 2;
2562 mask = 0x3 << shift;
2564 /* clear EDMA errors on this port */
2565 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2567 /* clear pending irq events */
2568 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2569 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2570 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2571 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2573 /* enable assertion of portN err, done events */
2574 tmp = readl(hpriv->main_mask_reg_addr);
2575 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2579 * mv_port_init - Perform some early initialization on a single port.
2580 * @port: libata data structure storing shadow register addresses
2581 * @port_mmio: base address of the port
2583 * Initialize shadow register mmio addresses, clear outstanding
2584 * interrupts on the port, and unmask interrupts for the future
2585 * start of the port.
2588 * Inherited from caller.
2590 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2592 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2595 /* PIO related setup
2597 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2599 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2600 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2601 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2602 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2603 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2604 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2606 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2607 /* special case: control/altstatus doesn't have ATA_REG_ address */
2608 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2611 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2613 /* Clear any currently outstanding port interrupt conditions */
2614 serr_ofs = mv_scr_offset(SCR_ERROR);
2615 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2616 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2618 /* unmask all non-transient EDMA error interrupts */
2619 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2621 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2622 readl(port_mmio + EDMA_CFG_OFS),
2623 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2624 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2627 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2629 struct pci_dev *pdev = to_pci_dev(host->dev);
2630 struct mv_host_priv *hpriv = host->private_data;
2631 u32 hp_flags = hpriv->hp_flags;
2633 switch (board_idx) {
2635 hpriv->ops = &mv5xxx_ops;
2636 hp_flags |= MV_HP_GEN_I;
2638 switch (pdev->revision) {
2640 hp_flags |= MV_HP_ERRATA_50XXB0;
2643 hp_flags |= MV_HP_ERRATA_50XXB2;
2646 dev_printk(KERN_WARNING, &pdev->dev,
2647 "Applying 50XXB2 workarounds to unknown rev\n");
2648 hp_flags |= MV_HP_ERRATA_50XXB2;
2655 hpriv->ops = &mv5xxx_ops;
2656 hp_flags |= MV_HP_GEN_I;
2658 switch (pdev->revision) {
2660 hp_flags |= MV_HP_ERRATA_50XXB0;
2663 hp_flags |= MV_HP_ERRATA_50XXB2;
2666 dev_printk(KERN_WARNING, &pdev->dev,
2667 "Applying B2 workarounds to unknown rev\n");
2668 hp_flags |= MV_HP_ERRATA_50XXB2;
2675 hpriv->ops = &mv6xxx_ops;
2676 hp_flags |= MV_HP_GEN_II;
2678 switch (pdev->revision) {
2680 hp_flags |= MV_HP_ERRATA_60X1B2;
2683 hp_flags |= MV_HP_ERRATA_60X1C0;
2686 dev_printk(KERN_WARNING, &pdev->dev,
2687 "Applying B2 workarounds to unknown rev\n");
2688 hp_flags |= MV_HP_ERRATA_60X1B2;
2694 hp_flags |= MV_HP_PCIE;
2695 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2696 (pdev->device == 0x2300 || pdev->device == 0x2310))
2699 * Highpoint RocketRAID PCIe 23xx series cards:
2701 * Unconfigured drives are treated as "Legacy"
2702 * by the BIOS, and it overwrites sector 8 with
2703 * a "Lgcy" metadata block prior to Linux boot.
2705 * Configured drives (RAID or JBOD) leave sector 8
2706 * alone, but instead overwrite a high numbered
2707 * sector for the RAID metadata. This sector can
2708 * be determined exactly, by truncating the physical
2709 * drive capacity to a nice even GB value.
2711 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2713 * Warn the user, lest they think we're just buggy.
2715 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2716 " BIOS CORRUPTS DATA on all attached drives,"
2717 " regardless of if/how they are configured."
2719 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2720 " use sectors 8-9 on \"Legacy\" drives,"
2721 " and avoid the final two gigabytes on"
2722 " all RocketRAID BIOS initialized drives.\n");
2725 hpriv->ops = &mv6xxx_ops;
2726 hp_flags |= MV_HP_GEN_IIE;
2728 switch (pdev->revision) {
2730 hp_flags |= MV_HP_ERRATA_XX42A0;
2733 hp_flags |= MV_HP_ERRATA_60X1C0;
2736 dev_printk(KERN_WARNING, &pdev->dev,
2737 "Applying 60X1C0 workarounds to unknown rev\n");
2738 hp_flags |= MV_HP_ERRATA_60X1C0;
2743 hpriv->ops = &mv_soc_ops;
2744 hp_flags |= MV_HP_ERRATA_60X1C0;
2748 dev_printk(KERN_ERR, host->dev,
2749 "BUG: invalid board index %u\n", board_idx);
2753 hpriv->hp_flags = hp_flags;
2754 if (hp_flags & MV_HP_PCIE) {
2755 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2756 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2757 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2759 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2760 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2761 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2768 * mv_init_host - Perform some early initialization of the host.
2769 * @host: ATA host to initialize
2770 * @board_idx: controller index
2772 * If possible, do an early global reset of the host. Then do
2773 * our port init and clear/unmask all/relevant host interrupts.
2776 * Inherited from caller.
2778 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2780 int rc = 0, n_hc, port, hc;
2781 struct mv_host_priv *hpriv = host->private_data;
2782 void __iomem *mmio = hpriv->base;
2784 rc = mv_chip_id(host, board_idx);
2788 if (HAS_PCI(host)) {
2789 hpriv->main_cause_reg_addr = hpriv->base +
2790 HC_MAIN_IRQ_CAUSE_OFS;
2791 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2793 hpriv->main_cause_reg_addr = hpriv->base +
2794 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2795 hpriv->main_mask_reg_addr = hpriv->base +
2796 HC_SOC_MAIN_IRQ_MASK_OFS;
2798 /* global interrupt mask */
2799 writel(0, hpriv->main_mask_reg_addr);
2801 n_hc = mv_get_hc_count(host->ports[0]->flags);
2803 for (port = 0; port < host->n_ports; port++)
2804 hpriv->ops->read_preamp(hpriv, port, mmio);
2806 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2810 hpriv->ops->reset_flash(hpriv, mmio);
2811 hpriv->ops->reset_bus(host, mmio);
2812 hpriv->ops->enable_leds(hpriv, mmio);
2814 for (port = 0; port < host->n_ports; port++) {
2815 if (IS_GEN_II(hpriv)) {
2816 void __iomem *port_mmio = mv_port_base(mmio, port);
2818 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2819 ifctl |= (1 << 7); /* enable gen2i speed */
2820 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2821 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2824 hpriv->ops->phy_errata(hpriv, mmio, port);
2827 for (port = 0; port < host->n_ports; port++) {
2828 struct ata_port *ap = host->ports[port];
2829 void __iomem *port_mmio = mv_port_base(mmio, port);
2831 mv_port_init(&ap->ioaddr, port_mmio);
2834 if (HAS_PCI(host)) {
2835 unsigned int offset = port_mmio - mmio;
2836 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2837 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2842 for (hc = 0; hc < n_hc; hc++) {
2843 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2845 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2846 "(before clear)=0x%08x\n", hc,
2847 readl(hc_mmio + HC_CFG_OFS),
2848 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2850 /* Clear any currently outstanding hc interrupt conditions */
2851 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2854 if (HAS_PCI(host)) {
2855 /* Clear any currently outstanding host interrupt conditions */
2856 writelfl(0, mmio + hpriv->irq_cause_ofs);
2858 /* and unmask interrupt generation for host regs */
2859 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2860 if (IS_GEN_I(hpriv))
2861 writelfl(~HC_MAIN_MASKED_IRQS_5,
2862 hpriv->main_mask_reg_addr);
2864 writelfl(~HC_MAIN_MASKED_IRQS,
2865 hpriv->main_mask_reg_addr);
2867 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2868 "PCI int cause/mask=0x%08x/0x%08x\n",
2869 readl(hpriv->main_cause_reg_addr),
2870 readl(hpriv->main_mask_reg_addr),
2871 readl(mmio + hpriv->irq_cause_ofs),
2872 readl(mmio + hpriv->irq_mask_ofs));
2874 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2875 hpriv->main_mask_reg_addr);
2876 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2877 readl(hpriv->main_cause_reg_addr),
2878 readl(hpriv->main_mask_reg_addr));
2884 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2886 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2888 if (!hpriv->crqb_pool)
2891 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2893 if (!hpriv->crpb_pool)
2896 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2898 if (!hpriv->sg_tbl_pool)
2904 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2905 struct mbus_dram_target_info *dram)
2909 for (i = 0; i < 4; i++) {
2910 writel(0, hpriv->base + WINDOW_CTRL(i));
2911 writel(0, hpriv->base + WINDOW_BASE(i));
2914 for (i = 0; i < dram->num_cs; i++) {
2915 struct mbus_dram_window *cs = dram->cs + i;
2917 writel(((cs->size - 1) & 0xffff0000) |
2918 (cs->mbus_attr << 8) |
2919 (dram->mbus_dram_target_id << 4) | 1,
2920 hpriv->base + WINDOW_CTRL(i));
2921 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2926 * mv_platform_probe - handle a positive probe of an soc Marvell
2928 * @pdev: platform device found
2931 * Inherited from caller.
2933 static int mv_platform_probe(struct platform_device *pdev)
2935 static int printed_version;
2936 const struct mv_sata_platform_data *mv_platform_data;
2937 const struct ata_port_info *ppi[] =
2938 { &mv_port_info[chip_soc], NULL };
2939 struct ata_host *host;
2940 struct mv_host_priv *hpriv;
2941 struct resource *res;
2944 if (!printed_version++)
2945 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2948 * Simple resource validation ..
2950 if (unlikely(pdev->num_resources != 2)) {
2951 dev_err(&pdev->dev, "invalid number of resources\n");
2956 * Get the register base first
2958 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2963 mv_platform_data = pdev->dev.platform_data;
2964 n_ports = mv_platform_data->n_ports;
2966 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2967 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2969 if (!host || !hpriv)
2971 host->private_data = hpriv;
2972 hpriv->n_ports = n_ports;
2975 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2976 res->end - res->start + 1);
2977 hpriv->base -= MV_SATAHC0_REG_BASE;
2980 * (Re-)program MBUS remapping windows if we are asked to.
2982 if (mv_platform_data->dram != NULL)
2983 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2985 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2989 /* initialize adapter */
2990 rc = mv_init_host(host, chip_soc);
2994 dev_printk(KERN_INFO, &pdev->dev,
2995 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2998 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2999 IRQF_SHARED, &mv6_sht);
3004 * mv_platform_remove - unplug a platform interface
3005 * @pdev: platform device
3007 * A platform bus SATA device has been unplugged. Perform the needed
3008 * cleanup. Also called on module unload for any active devices.
3010 static int __devexit mv_platform_remove(struct platform_device *pdev)
3012 struct device *dev = &pdev->dev;
3013 struct ata_host *host = dev_get_drvdata(dev);
3015 ata_host_detach(host);
3019 static struct platform_driver mv_platform_driver = {
3020 .probe = mv_platform_probe,
3021 .remove = __devexit_p(mv_platform_remove),
3024 .owner = THIS_MODULE,
3030 static int mv_pci_init_one(struct pci_dev *pdev,
3031 const struct pci_device_id *ent);
3034 static struct pci_driver mv_pci_driver = {
3036 .id_table = mv_pci_tbl,
3037 .probe = mv_pci_init_one,
3038 .remove = ata_pci_remove_one,
3044 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
3047 /* move to PCI layer or libata core? */
3048 static int pci_go_64(struct pci_dev *pdev)
3052 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3053 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3055 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3057 dev_printk(KERN_ERR, &pdev->dev,
3058 "64-bit DMA enable failed\n");
3063 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3065 dev_printk(KERN_ERR, &pdev->dev,
3066 "32-bit DMA enable failed\n");
3069 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3071 dev_printk(KERN_ERR, &pdev->dev,
3072 "32-bit consistent DMA enable failed\n");
3081 * mv_print_info - Dump key info to kernel log for perusal.
3082 * @host: ATA host to print info about
3084 * FIXME: complete this.
3087 * Inherited from caller.
3089 static void mv_print_info(struct ata_host *host)
3091 struct pci_dev *pdev = to_pci_dev(host->dev);
3092 struct mv_host_priv *hpriv = host->private_data;
3094 const char *scc_s, *gen;
3096 /* Use this to determine the HW stepping of the chip so we know
3097 * what errata to workaround
3099 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3102 else if (scc == 0x01)
3107 if (IS_GEN_I(hpriv))
3109 else if (IS_GEN_II(hpriv))
3111 else if (IS_GEN_IIE(hpriv))
3116 dev_printk(KERN_INFO, &pdev->dev,
3117 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3118 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3119 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3123 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3124 * @pdev: PCI device found
3125 * @ent: PCI device ID entry for the matched host
3128 * Inherited from caller.
3130 static int mv_pci_init_one(struct pci_dev *pdev,
3131 const struct pci_device_id *ent)
3133 static int printed_version;
3134 unsigned int board_idx = (unsigned int)ent->driver_data;
3135 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3136 struct ata_host *host;
3137 struct mv_host_priv *hpriv;
3140 if (!printed_version++)
3141 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3144 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3146 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3147 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3148 if (!host || !hpriv)
3150 host->private_data = hpriv;
3151 hpriv->n_ports = n_ports;
3153 /* acquire resources */
3154 rc = pcim_enable_device(pdev);
3158 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3160 pcim_pin_device(pdev);
3163 host->iomap = pcim_iomap_table(pdev);
3164 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3166 rc = pci_go_64(pdev);
3170 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3174 /* initialize adapter */
3175 rc = mv_init_host(host, board_idx);
3179 /* Enable interrupts */
3180 if (msi && pci_enable_msi(pdev))
3183 mv_dump_pci_cfg(pdev, 0x68);
3184 mv_print_info(host);
3186 pci_set_master(pdev);
3187 pci_try_set_mwi(pdev);
3188 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3189 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3193 static int mv_platform_probe(struct platform_device *pdev);
3194 static int __devexit mv_platform_remove(struct platform_device *pdev);
3196 static int __init mv_init(void)
3200 rc = pci_register_driver(&mv_pci_driver);
3204 rc = platform_driver_register(&mv_platform_driver);
3208 pci_unregister_driver(&mv_pci_driver);
3213 static void __exit mv_exit(void)
3216 pci_unregister_driver(&mv_pci_driver);
3218 platform_driver_unregister(&mv_platform_driver);
3221 MODULE_AUTHOR("Brett Russ");
3222 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3223 MODULE_LICENSE("GPL");
3224 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3225 MODULE_VERSION(DRV_VERSION);
3226 MODULE_ALIAS("platform:sata_mv");
3229 module_param(msi, int, 0444);
3230 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3233 module_init(mv_init);
3234 module_exit(mv_exit);