2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
33 2) Improve/fix IRQ and error handling sequences.
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
43 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
47 8) Develop a low-power-consumption strategy, and implement it.
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/pci.h>
70 #include <linux/init.h>
71 #include <linux/blkdev.h>
72 #include <linux/delay.h>
73 #include <linux/interrupt.h>
74 #include <linux/dmapool.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <linux/platform_device.h>
78 #include <linux/ata_platform.h>
79 #include <linux/mbus.h>
80 #include <scsi/scsi_host.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_device.h>
83 #include <linux/libata.h>
85 #define DRV_NAME "sata_mv"
86 #define DRV_VERSION "1.20"
89 /* BAR's are enumerated in terms of pci_resource_start() terms */
90 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
91 MV_IO_BAR = 2, /* offset 0x18: IO space */
92 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
94 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
95 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
98 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
99 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
100 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
101 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
102 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
103 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
105 MV_SATAHC0_REG_BASE = 0x20000,
106 MV_FLASH_CTL = 0x1046c,
107 MV_GPIO_PORT_CTL = 0x104f0,
108 MV_RESET_CFG = 0x180d8,
110 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
112 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
113 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
116 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
118 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 * CRPB needs alignment on a 256B boundary. Size == 256B
120 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
122 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
123 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
127 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
128 MV_PORT_HC_SHIFT = 2,
129 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
130 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
131 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
136 /* SoC integrated controllers, no PCI interface */
137 MV_FLAG_SOC = (1 << 28),
139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 ATA_FLAG_PIO_POLLING,
142 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
144 CRQB_FLAG_READ = (1 << 0),
146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
149 CRQB_CMD_ADDR_SHIFT = 8,
150 CRQB_CMD_CS = (0x2 << 11),
151 CRQB_CMD_LAST = (1 << 15),
153 CRPB_FLAG_STATUS_SHIFT = 8,
154 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
155 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
157 EPRD_FLAG_END_OF_TBL = (1 << 31),
159 /* PCI interface registers */
161 PCI_COMMAND_OFS = 0xc00,
163 PCI_MAIN_CMD_STS_OFS = 0xd30,
164 STOP_PCI_MASTER = (1 << 2),
165 PCI_MASTER_EMPTY = (1 << 3),
166 GLOB_SFT_RST = (1 << 4),
169 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
170 MV_PCI_DISC_TIMER = 0xd04,
171 MV_PCI_MSI_TRIGGER = 0xc38,
172 MV_PCI_SERR_MASK = 0xc28,
173 MV_PCI_XBAR_TMOUT = 0x1d04,
174 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
175 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
176 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
177 MV_PCI_ERR_COMMAND = 0x1d50,
179 PCI_IRQ_CAUSE_OFS = 0x1d58,
180 PCI_IRQ_MASK_OFS = 0x1d5c,
181 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
183 PCIE_IRQ_CAUSE_OFS = 0x1900,
184 PCIE_IRQ_MASK_OFS = 0x1910,
185 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
187 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
191 ERR_IRQ = (1 << 0), /* shift by port # */
192 DONE_IRQ = (1 << 1), /* shift by port # */
193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
196 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
197 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
198 PORTS_0_3_COAL_DONE = (1 << 8),
199 PORTS_4_7_COAL_DONE = (1 << 17),
200 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
201 GPIO_INT = (1 << 22),
202 SELF_INT = (1 << 23),
203 TWSI_INT = (1 << 24),
204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
208 PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
209 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
211 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
213 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
215 /* SATAHC registers */
218 HC_IRQ_CAUSE_OFS = 0x14,
219 DMA_IRQ = (1 << 0), /* shift by port # */
220 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
221 DEV_IRQ = (1 << 8), /* shift by port # */
223 /* Shadow block registers */
225 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
228 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
229 SATA_ACTIVE_OFS = 0x350,
230 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
233 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
238 SATA_IFCTL_OFS = 0x344,
239 SATA_IFSTAT_OFS = 0x34c,
240 VENDOR_UNIQUE_FIS_OFS = 0x35c,
243 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
248 SATA_INTERFACE_CFG = 0x050,
250 MV_M2_PREAMP_MASK = 0x7e0,
254 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
255 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
256 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
257 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
258 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
259 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
260 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
262 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
263 EDMA_ERR_IRQ_MASK_OFS = 0xc,
264 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
265 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
266 EDMA_ERR_DEV = (1 << 2), /* device error */
267 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
268 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
269 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
270 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
271 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
272 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
273 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
274 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
275 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
276 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
277 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
279 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
280 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
281 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
282 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
283 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
285 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
287 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
288 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
289 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
290 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
291 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
292 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
294 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
296 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
297 EDMA_ERR_OVERRUN_5 = (1 << 5),
298 EDMA_ERR_UNDERRUN_5 = (1 << 6),
300 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
301 EDMA_ERR_LNK_CTRL_RX_1 |
302 EDMA_ERR_LNK_CTRL_RX_3 |
303 EDMA_ERR_LNK_CTRL_TX |
304 /* temporary, until we fix hotplug: */
305 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
307 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
317 EDMA_ERR_LNK_CTRL_RX_2 |
318 EDMA_ERR_LNK_DATA_RX |
319 EDMA_ERR_LNK_DATA_TX |
320 EDMA_ERR_TRANS_PROTO,
322 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
327 EDMA_ERR_UNDERRUN_5 |
328 EDMA_ERR_SELF_DIS_5 |
334 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
335 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
337 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
338 EDMA_REQ_Q_PTR_SHIFT = 5,
340 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
341 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
342 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
343 EDMA_RSP_Q_PTR_SHIFT = 3,
345 EDMA_CMD_OFS = 0x28, /* EDMA command register */
346 EDMA_EN = (1 << 0), /* enable EDMA */
347 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
348 ATA_RST = (1 << 2), /* reset trans/link/phy */
350 EDMA_IORDY_TMOUT = 0x34,
353 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
355 /* Host private flags (hp_flags) */
356 MV_HP_FLAG_MSI = (1 << 0),
357 MV_HP_ERRATA_50XXB0 = (1 << 1),
358 MV_HP_ERRATA_50XXB2 = (1 << 2),
359 MV_HP_ERRATA_60X1B2 = (1 << 3),
360 MV_HP_ERRATA_60X1C0 = (1 << 4),
361 MV_HP_ERRATA_XX42A0 = (1 << 5),
362 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
363 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
364 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
365 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
367 /* Port private flags (pp_flags) */
368 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
369 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
372 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
373 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
374 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
375 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
377 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
378 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
381 /* DMA boundary 0xffff is required by the s/g splitting
382 * we need on /length/ in mv_fill-sg().
384 MV_DMA_BOUNDARY = 0xffffU,
386 /* mask of register bits containing lower 32 bits
387 * of EDMA request queue DMA address
389 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
391 /* ditto, for response queue */
392 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
406 /* Command ReQuest Block: 32B */
422 /* Command ResPonse Block: 8B */
429 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
437 struct mv_port_priv {
438 struct mv_crqb *crqb;
440 struct mv_crpb *crpb;
442 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
443 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
445 unsigned int req_idx;
446 unsigned int resp_idx;
451 struct mv_port_signal {
456 struct mv_host_priv {
458 struct mv_port_signal signal[8];
459 const struct mv_hw_ops *ops;
462 void __iomem *main_cause_reg_addr;
463 void __iomem *main_mask_reg_addr;
468 * These consistent DMA memory pools give us guaranteed
469 * alignment for hardware-accessed data structures,
470 * and less memory waste in accomplishing the alignment.
472 struct dma_pool *crqb_pool;
473 struct dma_pool *crpb_pool;
474 struct dma_pool *sg_tbl_pool;
478 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
480 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
481 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
483 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
485 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
486 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
489 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
490 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
491 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
492 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
493 static int mv_port_start(struct ata_port *ap);
494 static void mv_port_stop(struct ata_port *ap);
495 static void mv_qc_prep(struct ata_queued_cmd *qc);
496 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
497 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
498 static int mv_hardreset(struct ata_link *link, unsigned int *class,
499 unsigned long deadline);
500 static void mv_eh_freeze(struct ata_port *ap);
501 static void mv_eh_thaw(struct ata_port *ap);
502 static void mv6_dev_config(struct ata_device *dev);
504 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
506 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
507 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
509 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
511 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
512 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
514 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
516 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
517 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
519 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
521 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
522 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
524 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
526 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
527 void __iomem *mmio, unsigned int n_hc);
528 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
530 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
531 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
532 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
533 unsigned int port_no);
534 static int mv_stop_edma(struct ata_port *ap);
535 static int mv_stop_edma_engine(void __iomem *port_mmio);
536 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
538 static void mv_pmp_select(struct ata_port *ap, int pmp);
539 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
540 unsigned long deadline);
541 static int mv_softreset(struct ata_link *link, unsigned int *class,
542 unsigned long deadline);
544 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
545 * because we have to allow room for worst case splitting of
546 * PRDs for 64K boundaries in mv_fill_sg().
548 static struct scsi_host_template mv5_sht = {
549 ATA_BASE_SHT(DRV_NAME),
550 .sg_tablesize = MV_MAX_SG_CT / 2,
551 .dma_boundary = MV_DMA_BOUNDARY,
554 static struct scsi_host_template mv6_sht = {
555 ATA_NCQ_SHT(DRV_NAME),
556 .can_queue = MV_MAX_Q_DEPTH - 1,
557 .sg_tablesize = MV_MAX_SG_CT / 2,
558 .dma_boundary = MV_DMA_BOUNDARY,
561 static struct ata_port_operations mv5_ops = {
562 .inherits = &ata_sff_port_ops,
564 .qc_prep = mv_qc_prep,
565 .qc_issue = mv_qc_issue,
567 .freeze = mv_eh_freeze,
569 .hardreset = mv_hardreset,
570 .error_handler = ata_std_error_handler, /* avoid SFF EH */
571 .post_internal_cmd = ATA_OP_NULL,
573 .scr_read = mv5_scr_read,
574 .scr_write = mv5_scr_write,
576 .port_start = mv_port_start,
577 .port_stop = mv_port_stop,
580 static struct ata_port_operations mv6_ops = {
581 .inherits = &mv5_ops,
582 .qc_defer = sata_pmp_qc_defer_cmd_switch,
583 .dev_config = mv6_dev_config,
584 .scr_read = mv_scr_read,
585 .scr_write = mv_scr_write,
587 .pmp_hardreset = mv_pmp_hardreset,
588 .pmp_softreset = mv_softreset,
589 .softreset = mv_softreset,
590 .error_handler = sata_pmp_error_handler,
593 static struct ata_port_operations mv_iie_ops = {
594 .inherits = &mv6_ops,
595 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
596 .dev_config = ATA_OP_NULL,
597 .qc_prep = mv_qc_prep_iie,
600 static const struct ata_port_info mv_port_info[] = {
602 .flags = MV_COMMON_FLAGS,
603 .pio_mask = 0x1f, /* pio0-4 */
604 .udma_mask = ATA_UDMA6,
605 .port_ops = &mv5_ops,
608 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
609 .pio_mask = 0x1f, /* pio0-4 */
610 .udma_mask = ATA_UDMA6,
611 .port_ops = &mv5_ops,
614 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
615 .pio_mask = 0x1f, /* pio0-4 */
616 .udma_mask = ATA_UDMA6,
617 .port_ops = &mv5_ops,
620 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
621 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
623 .pio_mask = 0x1f, /* pio0-4 */
624 .udma_mask = ATA_UDMA6,
625 .port_ops = &mv6_ops,
628 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
629 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
630 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
631 .pio_mask = 0x1f, /* pio0-4 */
632 .udma_mask = ATA_UDMA6,
633 .port_ops = &mv6_ops,
636 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
637 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
639 .pio_mask = 0x1f, /* pio0-4 */
640 .udma_mask = ATA_UDMA6,
641 .port_ops = &mv_iie_ops,
644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
645 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
647 .pio_mask = 0x1f, /* pio0-4 */
648 .udma_mask = ATA_UDMA6,
649 .port_ops = &mv_iie_ops,
652 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
653 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
654 ATA_FLAG_NCQ | MV_FLAG_SOC,
655 .pio_mask = 0x1f, /* pio0-4 */
656 .udma_mask = ATA_UDMA6,
657 .port_ops = &mv_iie_ops,
661 static const struct pci_device_id mv_pci_tbl[] = {
662 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
663 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
664 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
665 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
666 /* RocketRAID 1740/174x have different identifiers */
667 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
668 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
670 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
671 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
672 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
673 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
674 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
676 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
679 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
681 /* Marvell 7042 support */
682 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
684 /* Highpoint RocketRAID PCIe series */
685 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
686 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
688 { } /* terminate list */
691 static const struct mv_hw_ops mv5xxx_ops = {
692 .phy_errata = mv5_phy_errata,
693 .enable_leds = mv5_enable_leds,
694 .read_preamp = mv5_read_preamp,
695 .reset_hc = mv5_reset_hc,
696 .reset_flash = mv5_reset_flash,
697 .reset_bus = mv5_reset_bus,
700 static const struct mv_hw_ops mv6xxx_ops = {
701 .phy_errata = mv6_phy_errata,
702 .enable_leds = mv6_enable_leds,
703 .read_preamp = mv6_read_preamp,
704 .reset_hc = mv6_reset_hc,
705 .reset_flash = mv6_reset_flash,
706 .reset_bus = mv_reset_pci_bus,
709 static const struct mv_hw_ops mv_soc_ops = {
710 .phy_errata = mv6_phy_errata,
711 .enable_leds = mv_soc_enable_leds,
712 .read_preamp = mv_soc_read_preamp,
713 .reset_hc = mv_soc_reset_hc,
714 .reset_flash = mv_soc_reset_flash,
715 .reset_bus = mv_soc_reset_bus,
722 static inline void writelfl(unsigned long data, void __iomem *addr)
725 (void) readl(addr); /* flush to avoid PCI posted write */
728 static inline unsigned int mv_hc_from_port(unsigned int port)
730 return port >> MV_PORT_HC_SHIFT;
733 static inline unsigned int mv_hardport_from_port(unsigned int port)
735 return port & MV_PORT_MASK;
739 * Consolidate some rather tricky bit shift calculations.
740 * This is hot-path stuff, so not a function.
741 * Simple code, with two return values, so macro rather than inline.
743 * port is the sole input, in range 0..7.
744 * shift is one output, for use with the main_cause and main_mask registers.
745 * hardport is the other output, in range 0..3
747 * Note that port and hardport may be the same variable in some cases.
749 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
751 shift = mv_hc_from_port(port) * HC_SHIFT; \
752 hardport = mv_hardport_from_port(port); \
753 shift += hardport * 2; \
756 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
758 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
761 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
764 return mv_hc_base(base, mv_hc_from_port(port));
767 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
769 return mv_hc_base_from_port(base, port) +
770 MV_SATAHC_ARBTR_REG_SZ +
771 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
774 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
776 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
777 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
779 return hc_mmio + ofs;
782 static inline void __iomem *mv_host_base(struct ata_host *host)
784 struct mv_host_priv *hpriv = host->private_data;
788 static inline void __iomem *mv_ap_base(struct ata_port *ap)
790 return mv_port_base(mv_host_base(ap->host), ap->port_no);
793 static inline int mv_get_hc_count(unsigned long port_flags)
795 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
798 static void mv_set_edma_ptrs(void __iomem *port_mmio,
799 struct mv_host_priv *hpriv,
800 struct mv_port_priv *pp)
805 * initialize request queue
807 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
808 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
810 WARN_ON(pp->crqb_dma & 0x3ff);
811 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
812 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
813 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
815 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
816 writelfl((pp->crqb_dma & 0xffffffff) | index,
817 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
819 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
822 * initialize response queue
824 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
825 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
827 WARN_ON(pp->crpb_dma & 0xff);
828 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
830 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
831 writelfl((pp->crpb_dma & 0xffffffff) | index,
832 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
834 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
836 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
837 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
841 * mv_start_dma - Enable eDMA engine
842 * @base: port base address
843 * @pp: port private data
845 * Verify the local cache of the eDMA state is accurate with a
849 * Inherited from caller.
851 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
852 struct mv_port_priv *pp, u8 protocol)
854 int want_ncq = (protocol == ATA_PROT_NCQ);
856 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
857 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
858 if (want_ncq != using_ncq)
861 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
862 struct mv_host_priv *hpriv = ap->host->private_data;
863 int hardport = mv_hardport_from_port(ap->port_no);
864 void __iomem *hc_mmio = mv_hc_base_from_port(
865 mv_host_base(ap->host), hardport);
866 u32 hc_irq_cause, ipending;
868 /* clear EDMA event indicators, if any */
869 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
871 /* clear EDMA interrupt indicator, if any */
872 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
873 ipending = (DEV_IRQ | DMA_IRQ) << hardport;
874 if (hc_irq_cause & ipending) {
875 writelfl(hc_irq_cause & ~ipending,
876 hc_mmio + HC_IRQ_CAUSE_OFS);
879 mv_edma_cfg(ap, want_ncq);
881 /* clear FIS IRQ Cause */
882 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
884 mv_set_edma_ptrs(port_mmio, hpriv, pp);
886 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
887 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
892 * mv_stop_edma_engine - Disable eDMA engine
893 * @port_mmio: io base address
896 * Inherited from caller.
898 static int mv_stop_edma_engine(void __iomem *port_mmio)
902 /* Disable eDMA. The disable bit auto clears. */
903 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
905 /* Wait for the chip to confirm eDMA is off. */
906 for (i = 10000; i > 0; i--) {
907 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
908 if (!(reg & EDMA_EN))
915 static int mv_stop_edma(struct ata_port *ap)
917 void __iomem *port_mmio = mv_ap_base(ap);
918 struct mv_port_priv *pp = ap->private_data;
920 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
922 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
923 if (mv_stop_edma_engine(port_mmio)) {
924 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
931 static void mv_dump_mem(void __iomem *start, unsigned bytes)
934 for (b = 0; b < bytes; ) {
935 DPRINTK("%p: ", start + b);
936 for (w = 0; b < bytes && w < 4; w++) {
937 printk("%08x ", readl(start + b));
945 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
950 for (b = 0; b < bytes; ) {
951 DPRINTK("%02x: ", b);
952 for (w = 0; b < bytes && w < 4; w++) {
953 (void) pci_read_config_dword(pdev, b, &dw);
961 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
962 struct pci_dev *pdev)
965 void __iomem *hc_base = mv_hc_base(mmio_base,
966 port >> MV_PORT_HC_SHIFT);
967 void __iomem *port_base;
968 int start_port, num_ports, p, start_hc, num_hcs, hc;
971 start_hc = start_port = 0;
972 num_ports = 8; /* shld be benign for 4 port devs */
975 start_hc = port >> MV_PORT_HC_SHIFT;
977 num_ports = num_hcs = 1;
979 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
980 num_ports > 1 ? num_ports - 1 : start_port);
983 DPRINTK("PCI config space regs:\n");
984 mv_dump_pci_cfg(pdev, 0x68);
986 DPRINTK("PCI regs:\n");
987 mv_dump_mem(mmio_base+0xc00, 0x3c);
988 mv_dump_mem(mmio_base+0xd00, 0x34);
989 mv_dump_mem(mmio_base+0xf00, 0x4);
990 mv_dump_mem(mmio_base+0x1d00, 0x6c);
991 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
992 hc_base = mv_hc_base(mmio_base, hc);
993 DPRINTK("HC regs (HC %i):\n", hc);
994 mv_dump_mem(hc_base, 0x1c);
996 for (p = start_port; p < start_port + num_ports; p++) {
997 port_base = mv_port_base(mmio_base, p);
998 DPRINTK("EDMA regs (port %i):\n", p);
999 mv_dump_mem(port_base, 0x54);
1000 DPRINTK("SATA regs (port %i):\n", p);
1001 mv_dump_mem(port_base+0x300, 0x60);
1006 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1010 switch (sc_reg_in) {
1014 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1017 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1026 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1028 unsigned int ofs = mv_scr_offset(sc_reg_in);
1030 if (ofs != 0xffffffffU) {
1031 *val = readl(mv_ap_base(ap) + ofs);
1037 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1039 unsigned int ofs = mv_scr_offset(sc_reg_in);
1041 if (ofs != 0xffffffffU) {
1042 writelfl(val, mv_ap_base(ap) + ofs);
1048 static void mv6_dev_config(struct ata_device *adev)
1051 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1053 * Gen-II does not support NCQ over a port multiplier
1054 * (no FIS-based switching).
1056 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1057 * See mv_qc_prep() for more info.
1059 if (adev->flags & ATA_DFLAG_NCQ) {
1060 if (sata_pmp_attached(adev->link->ap)) {
1061 adev->flags &= ~ATA_DFLAG_NCQ;
1062 ata_dev_printk(adev, KERN_INFO,
1063 "NCQ disabled for command-based switching\n");
1064 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1065 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1066 ata_dev_printk(adev, KERN_INFO,
1067 "max_sectors limited to %u for NCQ\n",
1073 static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1075 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1077 * Various bit settings required for operation
1078 * in FIS-based switching (fbs) mode on GenIIe:
1080 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1081 old_ltmode = readl(port_mmio + LTMODE_OFS);
1083 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1084 new_ltmode = old_ltmode | LTMODE_BIT8;
1085 } else { /* disable fbs */
1086 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1087 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1089 if (new_fcfg != old_fcfg)
1090 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1091 if (new_ltmode != old_ltmode)
1092 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1095 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1098 struct mv_port_priv *pp = ap->private_data;
1099 struct mv_host_priv *hpriv = ap->host->private_data;
1100 void __iomem *port_mmio = mv_ap_base(ap);
1102 /* set up non-NCQ EDMA configuration */
1103 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1105 if (IS_GEN_I(hpriv))
1106 cfg |= (1 << 8); /* enab config burst size mask */
1108 else if (IS_GEN_II(hpriv))
1109 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1111 else if (IS_GEN_IIE(hpriv)) {
1112 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1113 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1114 cfg |= (1 << 18); /* enab early completion */
1115 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1117 if (want_ncq && sata_pmp_attached(ap)) {
1118 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1119 mv_config_fbs(port_mmio, 1);
1121 mv_config_fbs(port_mmio, 0);
1126 cfg |= EDMA_CFG_NCQ;
1127 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1129 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1131 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1134 static void mv_port_free_dma_mem(struct ata_port *ap)
1136 struct mv_host_priv *hpriv = ap->host->private_data;
1137 struct mv_port_priv *pp = ap->private_data;
1141 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1145 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1149 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1150 * For later hardware, we have one unique sg_tbl per NCQ tag.
1152 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1153 if (pp->sg_tbl[tag]) {
1154 if (tag == 0 || !IS_GEN_I(hpriv))
1155 dma_pool_free(hpriv->sg_tbl_pool,
1157 pp->sg_tbl_dma[tag]);
1158 pp->sg_tbl[tag] = NULL;
1164 * mv_port_start - Port specific init/start routine.
1165 * @ap: ATA channel to manipulate
1167 * Allocate and point to DMA memory, init port private memory,
1171 * Inherited from caller.
1173 static int mv_port_start(struct ata_port *ap)
1175 struct device *dev = ap->host->dev;
1176 struct mv_host_priv *hpriv = ap->host->private_data;
1177 struct mv_port_priv *pp;
1180 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1183 ap->private_data = pp;
1185 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1188 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1190 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1192 goto out_port_free_dma_mem;
1193 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1196 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1197 * For later hardware, we need one unique sg_tbl per NCQ tag.
1199 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1200 if (tag == 0 || !IS_GEN_I(hpriv)) {
1201 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1202 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1203 if (!pp->sg_tbl[tag])
1204 goto out_port_free_dma_mem;
1206 pp->sg_tbl[tag] = pp->sg_tbl[0];
1207 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1212 out_port_free_dma_mem:
1213 mv_port_free_dma_mem(ap);
1218 * mv_port_stop - Port specific cleanup/stop routine.
1219 * @ap: ATA channel to manipulate
1221 * Stop DMA, cleanup port memory.
1224 * This routine uses the host lock to protect the DMA stop.
1226 static void mv_port_stop(struct ata_port *ap)
1229 mv_port_free_dma_mem(ap);
1233 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1234 * @qc: queued command whose SG list to source from
1236 * Populate the SG list and mark the last entry.
1239 * Inherited from caller.
1241 static void mv_fill_sg(struct ata_queued_cmd *qc)
1243 struct mv_port_priv *pp = qc->ap->private_data;
1244 struct scatterlist *sg;
1245 struct mv_sg *mv_sg, *last_sg = NULL;
1248 mv_sg = pp->sg_tbl[qc->tag];
1249 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1250 dma_addr_t addr = sg_dma_address(sg);
1251 u32 sg_len = sg_dma_len(sg);
1254 u32 offset = addr & 0xffff;
1257 if ((offset + sg_len > 0x10000))
1258 len = 0x10000 - offset;
1260 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1261 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1262 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1272 if (likely(last_sg))
1273 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1276 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1278 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1279 (last ? CRQB_CMD_LAST : 0);
1280 *cmdw = cpu_to_le16(tmp);
1284 * mv_qc_prep - Host specific command preparation.
1285 * @qc: queued command to prepare
1287 * This routine simply redirects to the general purpose routine
1288 * if command is not DMA. Else, it handles prep of the CRQB
1289 * (command request block), does some sanity checking, and calls
1290 * the SG load routine.
1293 * Inherited from caller.
1295 static void mv_qc_prep(struct ata_queued_cmd *qc)
1297 struct ata_port *ap = qc->ap;
1298 struct mv_port_priv *pp = ap->private_data;
1300 struct ata_taskfile *tf;
1304 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1305 (qc->tf.protocol != ATA_PROT_NCQ))
1308 /* Fill in command request block
1310 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1311 flags |= CRQB_FLAG_READ;
1312 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1313 flags |= qc->tag << CRQB_TAG_SHIFT;
1314 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1316 /* get current queue index from software */
1317 in_index = pp->req_idx;
1319 pp->crqb[in_index].sg_addr =
1320 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1321 pp->crqb[in_index].sg_addr_hi =
1322 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1323 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1325 cw = &pp->crqb[in_index].ata_cmd[0];
1328 /* Sadly, the CRQB cannot accomodate all registers--there are
1329 * only 11 bytes...so we must pick and choose required
1330 * registers based on the command. So, we drop feature and
1331 * hob_feature for [RW] DMA commands, but they are needed for
1332 * NCQ. NCQ will drop hob_nsect.
1334 switch (tf->command) {
1336 case ATA_CMD_READ_EXT:
1338 case ATA_CMD_WRITE_EXT:
1339 case ATA_CMD_WRITE_FUA_EXT:
1340 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1342 case ATA_CMD_FPDMA_READ:
1343 case ATA_CMD_FPDMA_WRITE:
1344 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1345 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1348 /* The only other commands EDMA supports in non-queued and
1349 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1350 * of which are defined/used by Linux. If we get here, this
1351 * driver needs work.
1353 * FIXME: modify libata to give qc_prep a return value and
1354 * return error here.
1356 BUG_ON(tf->command);
1359 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1360 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1361 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1362 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1363 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1364 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1365 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1366 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1367 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1369 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1375 * mv_qc_prep_iie - Host specific command preparation.
1376 * @qc: queued command to prepare
1378 * This routine simply redirects to the general purpose routine
1379 * if command is not DMA. Else, it handles prep of the CRQB
1380 * (command request block), does some sanity checking, and calls
1381 * the SG load routine.
1384 * Inherited from caller.
1386 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1388 struct ata_port *ap = qc->ap;
1389 struct mv_port_priv *pp = ap->private_data;
1390 struct mv_crqb_iie *crqb;
1391 struct ata_taskfile *tf;
1395 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1396 (qc->tf.protocol != ATA_PROT_NCQ))
1399 /* Fill in Gen IIE command request block */
1400 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1401 flags |= CRQB_FLAG_READ;
1403 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1404 flags |= qc->tag << CRQB_TAG_SHIFT;
1405 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1406 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1408 /* get current queue index from software */
1409 in_index = pp->req_idx;
1411 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1412 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1413 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1414 crqb->flags = cpu_to_le32(flags);
1417 crqb->ata_cmd[0] = cpu_to_le32(
1418 (tf->command << 16) |
1421 crqb->ata_cmd[1] = cpu_to_le32(
1427 crqb->ata_cmd[2] = cpu_to_le32(
1428 (tf->hob_lbal << 0) |
1429 (tf->hob_lbam << 8) |
1430 (tf->hob_lbah << 16) |
1431 (tf->hob_feature << 24)
1433 crqb->ata_cmd[3] = cpu_to_le32(
1435 (tf->hob_nsect << 8)
1438 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1444 * mv_qc_issue - Initiate a command to the host
1445 * @qc: queued command to start
1447 * This routine simply redirects to the general purpose routine
1448 * if command is not DMA. Else, it sanity checks our local
1449 * caches of the request producer/consumer indices then enables
1450 * DMA and bumps the request producer index.
1453 * Inherited from caller.
1455 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1457 struct ata_port *ap = qc->ap;
1458 void __iomem *port_mmio = mv_ap_base(ap);
1459 struct mv_port_priv *pp = ap->private_data;
1462 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1463 (qc->tf.protocol != ATA_PROT_NCQ)) {
1465 * We're about to send a non-EDMA capable command to the
1466 * port. Turn off EDMA so there won't be problems accessing
1467 * shadow block, etc registers.
1470 mv_pmp_select(ap, qc->dev->link->pmp);
1471 return ata_sff_qc_issue(qc);
1474 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1476 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1477 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
1479 /* and write the request in pointer to kick the EDMA to life */
1480 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1481 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1486 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
1488 struct mv_port_priv *pp = ap->private_data;
1489 struct ata_queued_cmd *qc;
1491 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1493 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1494 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1499 static void mv_unexpected_intr(struct ata_port *ap)
1501 struct mv_port_priv *pp = ap->private_data;
1502 struct ata_eh_info *ehi = &ap->link.eh_info;
1506 * We got a device interrupt from something that
1507 * was supposed to be using EDMA or polling.
1509 ata_ehi_clear_desc(ehi);
1510 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1511 when = " while EDMA enabled";
1513 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
1514 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1515 when = " while polling";
1517 ata_ehi_push_desc(ehi, "unexpected device interrupt%s", when);
1518 ehi->err_mask |= AC_ERR_OTHER;
1519 ehi->action |= ATA_EH_RESET;
1520 ata_port_freeze(ap);
1524 * mv_err_intr - Handle error interrupts on the port
1525 * @ap: ATA channel to manipulate
1526 * @reset_allowed: bool: 0 == don't trigger from reset here
1528 * In most cases, just clear the interrupt and move on. However,
1529 * some cases require an eDMA reset, which also performs a COMRESET.
1530 * The SERR case requires a clear of pending errors in the SATA
1531 * SERROR register. Finally, if the port disabled DMA,
1532 * update our cached copy to match.
1535 * Inherited from caller.
1537 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1539 void __iomem *port_mmio = mv_ap_base(ap);
1540 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1541 struct mv_port_priv *pp = ap->private_data;
1542 struct mv_host_priv *hpriv = ap->host->private_data;
1543 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1544 unsigned int action = 0, err_mask = 0;
1545 struct ata_eh_info *ehi = &ap->link.eh_info;
1547 ata_ehi_clear_desc(ehi);
1549 if (!edma_enabled) {
1550 /* just a guess: do we need to do this? should we
1551 * expand this, and do it in all cases?
1553 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1554 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1557 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1559 ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
1562 * All generations share these EDMA error cause bits:
1564 if (edma_err_cause & EDMA_ERR_DEV)
1565 err_mask |= AC_ERR_DEV;
1566 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1567 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1568 EDMA_ERR_INTRL_PAR)) {
1569 err_mask |= AC_ERR_ATA_BUS;
1570 action |= ATA_EH_RESET;
1571 ata_ehi_push_desc(ehi, "parity error");
1573 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1574 ata_ehi_hotplugged(ehi);
1575 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1576 "dev disconnect" : "dev connect");
1577 action |= ATA_EH_RESET;
1581 * Gen-I has a different SELF_DIS bit,
1582 * different FREEZE bits, and no SERR bit:
1584 if (IS_GEN_I(hpriv)) {
1585 eh_freeze_mask = EDMA_EH_FREEZE_5;
1586 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1587 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1588 ata_ehi_push_desc(ehi, "EDMA self-disable");
1591 eh_freeze_mask = EDMA_EH_FREEZE;
1592 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1593 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1594 ata_ehi_push_desc(ehi, "EDMA self-disable");
1596 if (edma_err_cause & EDMA_ERR_SERR) {
1597 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1598 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1599 err_mask = AC_ERR_ATA_BUS;
1600 action |= ATA_EH_RESET;
1604 /* Clear EDMA now that SERR cleanup done */
1605 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1608 err_mask = AC_ERR_OTHER;
1609 action |= ATA_EH_RESET;
1612 ehi->serror |= serr;
1613 ehi->action |= action;
1616 qc->err_mask |= err_mask;
1618 ehi->err_mask |= err_mask;
1620 if (edma_err_cause & eh_freeze_mask)
1621 ata_port_freeze(ap);
1626 static void mv_process_crpb_response(struct ata_port *ap,
1627 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
1629 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1633 u16 edma_status = le16_to_cpu(response->flags);
1635 * edma_status from a response queue entry:
1636 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
1637 * MSB is saved ATA status from command completion.
1640 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
1643 * Error will be seen/handled by mv_err_intr().
1644 * So do nothing at all here.
1649 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
1650 qc->err_mask |= ac_err_mask(ata_status);
1651 ata_qc_complete(qc);
1653 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
1658 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
1660 void __iomem *port_mmio = mv_ap_base(ap);
1661 struct mv_host_priv *hpriv = ap->host->private_data;
1663 bool work_done = false;
1664 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
1666 /* Get the hardware queue position index */
1667 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1668 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1670 /* Process new responses from since the last time we looked */
1671 while (in_index != pp->resp_idx) {
1673 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
1675 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1677 if (IS_GEN_I(hpriv)) {
1678 /* 50xx: no NCQ, only one command active at a time */
1679 tag = ap->link.active_tag;
1681 /* Gen II/IIE: get command tag from CRPB entry */
1682 tag = le16_to_cpu(response->id) & 0x1f;
1684 mv_process_crpb_response(ap, response, tag, ncq_enabled);
1688 /* Update the software queue position index in hardware */
1690 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1691 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
1692 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1696 * mv_host_intr - Handle all interrupts on the given host controller
1697 * @host: host specific structure
1698 * @main_cause: Main interrupt cause register for the chip.
1701 * Inherited from caller.
1703 static int mv_host_intr(struct ata_host *host, u32 main_cause)
1705 struct mv_host_priv *hpriv = host->private_data;
1706 void __iomem *mmio = hpriv->base, *hc_mmio = NULL;
1707 u32 hc_irq_cause = 0;
1708 unsigned int handled = 0, port;
1710 for (port = 0; port < hpriv->n_ports; port++) {
1711 struct ata_port *ap = host->ports[port];
1712 struct mv_port_priv *pp;
1713 unsigned int shift, hardport, port_cause;
1715 * When we move to the second hc, flag our cached
1716 * copies of hc_mmio (and hc_irq_cause) as invalid again.
1718 if (port == MV_PORTS_PER_HC)
1721 * Do nothing if port is not interrupting or is disabled:
1723 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1724 port_cause = (main_cause >> shift) & (DONE_IRQ | ERR_IRQ);
1725 if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED))
1728 * Each hc within the host has its own hc_irq_cause register.
1729 * We defer reading it until we know we need it, right now:
1731 * FIXME later: we don't really need to read this register
1732 * (some logic changes required below if we go that way),
1733 * because it doesn't tell us anything new. But we do need
1734 * to write to it, outside the top of this loop,
1735 * to reset the interrupt triggers for next time.
1738 hc_mmio = mv_hc_base_from_port(mmio, port);
1739 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1740 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1744 * Process completed CRPB response(s) before other events.
1746 pp = ap->private_data;
1747 if (hc_irq_cause & (DMA_IRQ << hardport)) {
1748 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN)
1749 mv_process_crpb_entries(ap, pp);
1752 * Handle chip-reported errors, or continue on to handle PIO.
1754 if (unlikely(port_cause & ERR_IRQ)) {
1755 mv_err_intr(ap, mv_get_active_qc(ap));
1756 } else if (hc_irq_cause & (DEV_IRQ << hardport)) {
1757 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1758 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
1760 ata_sff_host_intr(ap, qc);
1764 mv_unexpected_intr(ap);
1770 static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
1772 struct mv_host_priv *hpriv = host->private_data;
1773 struct ata_port *ap;
1774 struct ata_queued_cmd *qc;
1775 struct ata_eh_info *ehi;
1776 unsigned int i, err_mask, printed = 0;
1779 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1781 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1784 DPRINTK("All regs @ PCI error\n");
1785 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1787 writelfl(0, mmio + hpriv->irq_cause_ofs);
1789 for (i = 0; i < host->n_ports; i++) {
1790 ap = host->ports[i];
1791 if (!ata_link_offline(&ap->link)) {
1792 ehi = &ap->link.eh_info;
1793 ata_ehi_clear_desc(ehi);
1795 ata_ehi_push_desc(ehi,
1796 "PCI err cause 0x%08x", err_cause);
1797 err_mask = AC_ERR_HOST_BUS;
1798 ehi->action = ATA_EH_RESET;
1799 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1801 qc->err_mask |= err_mask;
1803 ehi->err_mask |= err_mask;
1805 ata_port_freeze(ap);
1808 return 1; /* handled */
1812 * mv_interrupt - Main interrupt event handler
1814 * @dev_instance: private data; in this case the host structure
1816 * Read the read only register to determine if any host
1817 * controllers have pending interrupts. If so, call lower level
1818 * routine to handle. Also check for PCI errors which are only
1822 * This routine holds the host lock while processing pending
1825 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1827 struct ata_host *host = dev_instance;
1828 struct mv_host_priv *hpriv = host->private_data;
1829 unsigned int handled = 0;
1830 u32 main_cause, main_mask;
1832 spin_lock(&host->lock);
1833 main_cause = readl(hpriv->main_cause_reg_addr);
1834 main_mask = readl(hpriv->main_mask_reg_addr);
1836 * Deal with cases where we either have nothing pending, or have read
1837 * a bogus register value which can indicate HW removal or PCI fault.
1839 if ((main_cause & main_mask) && (main_cause != 0xffffffffU)) {
1840 if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host)))
1841 handled = mv_pci_error(host, hpriv->base);
1843 handled = mv_host_intr(host, main_cause);
1845 spin_unlock(&host->lock);
1846 return IRQ_RETVAL(handled);
1849 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1853 switch (sc_reg_in) {
1857 ofs = sc_reg_in * sizeof(u32);
1866 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1868 struct mv_host_priv *hpriv = ap->host->private_data;
1869 void __iomem *mmio = hpriv->base;
1870 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1871 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1873 if (ofs != 0xffffffffU) {
1874 *val = readl(addr + ofs);
1880 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1882 struct mv_host_priv *hpriv = ap->host->private_data;
1883 void __iomem *mmio = hpriv->base;
1884 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1885 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1887 if (ofs != 0xffffffffU) {
1888 writelfl(val, addr + ofs);
1894 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1896 struct pci_dev *pdev = to_pci_dev(host->dev);
1899 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1902 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1904 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1907 mv_reset_pci_bus(host, mmio);
1910 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1912 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1915 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1918 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1921 tmp = readl(phy_mmio + MV5_PHY_MODE);
1923 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1924 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1927 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1931 writel(0, mmio + MV_GPIO_PORT_CTL);
1933 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1935 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1937 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1940 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1943 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1944 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1946 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1949 tmp = readl(phy_mmio + MV5_LT_MODE);
1951 writel(tmp, phy_mmio + MV5_LT_MODE);
1953 tmp = readl(phy_mmio + MV5_PHY_CTL);
1956 writel(tmp, phy_mmio + MV5_PHY_CTL);
1959 tmp = readl(phy_mmio + MV5_PHY_MODE);
1961 tmp |= hpriv->signal[port].pre;
1962 tmp |= hpriv->signal[port].amps;
1963 writel(tmp, phy_mmio + MV5_PHY_MODE);
1968 #define ZERO(reg) writel(0, port_mmio + (reg))
1969 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1972 void __iomem *port_mmio = mv_port_base(mmio, port);
1975 * The datasheet warns against setting ATA_RST when EDMA is active
1976 * (but doesn't say what the problem might be). So we first try
1977 * to disable the EDMA engine before doing the ATA_RST operation.
1979 mv_reset_channel(hpriv, mmio, port);
1981 ZERO(0x028); /* command */
1982 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1983 ZERO(0x004); /* timer */
1984 ZERO(0x008); /* irq err cause */
1985 ZERO(0x00c); /* irq err mask */
1986 ZERO(0x010); /* rq bah */
1987 ZERO(0x014); /* rq inp */
1988 ZERO(0x018); /* rq outp */
1989 ZERO(0x01c); /* respq bah */
1990 ZERO(0x024); /* respq outp */
1991 ZERO(0x020); /* respq inp */
1992 ZERO(0x02c); /* test control */
1993 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1997 #define ZERO(reg) writel(0, hc_mmio + (reg))
1998 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2001 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2009 tmp = readl(hc_mmio + 0x20);
2012 writel(tmp, hc_mmio + 0x20);
2016 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2019 unsigned int hc, port;
2021 for (hc = 0; hc < n_hc; hc++) {
2022 for (port = 0; port < MV_PORTS_PER_HC; port++)
2023 mv5_reset_hc_port(hpriv, mmio,
2024 (hc * MV_PORTS_PER_HC) + port);
2026 mv5_reset_one_hc(hpriv, mmio, hc);
2033 #define ZERO(reg) writel(0, mmio + (reg))
2034 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2036 struct mv_host_priv *hpriv = host->private_data;
2039 tmp = readl(mmio + MV_PCI_MODE);
2041 writel(tmp, mmio + MV_PCI_MODE);
2043 ZERO(MV_PCI_DISC_TIMER);
2044 ZERO(MV_PCI_MSI_TRIGGER);
2045 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2046 ZERO(HC_MAIN_IRQ_MASK_OFS);
2047 ZERO(MV_PCI_SERR_MASK);
2048 ZERO(hpriv->irq_cause_ofs);
2049 ZERO(hpriv->irq_mask_ofs);
2050 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2051 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2052 ZERO(MV_PCI_ERR_ATTRIBUTE);
2053 ZERO(MV_PCI_ERR_COMMAND);
2057 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2061 mv5_reset_flash(hpriv, mmio);
2063 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2065 tmp |= (1 << 5) | (1 << 6);
2066 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2070 * mv6_reset_hc - Perform the 6xxx global soft reset
2071 * @mmio: base address of the HBA
2073 * This routine only applies to 6xxx parts.
2076 * Inherited from caller.
2078 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2081 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2085 /* Following procedure defined in PCI "main command and status
2089 writel(t | STOP_PCI_MASTER, reg);
2091 for (i = 0; i < 1000; i++) {
2094 if (PCI_MASTER_EMPTY & t)
2097 if (!(PCI_MASTER_EMPTY & t)) {
2098 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2106 writel(t | GLOB_SFT_RST, reg);
2109 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2111 if (!(GLOB_SFT_RST & t)) {
2112 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2117 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2120 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2123 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2125 if (GLOB_SFT_RST & t) {
2126 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2130 * Temporary: wait 3 seconds before port-probing can happen,
2131 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2132 * This can go away once hotplug is fully/correctly implemented.
2140 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2143 void __iomem *port_mmio;
2146 tmp = readl(mmio + MV_RESET_CFG);
2147 if ((tmp & (1 << 0)) == 0) {
2148 hpriv->signal[idx].amps = 0x7 << 8;
2149 hpriv->signal[idx].pre = 0x1 << 5;
2153 port_mmio = mv_port_base(mmio, idx);
2154 tmp = readl(port_mmio + PHY_MODE2);
2156 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2157 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2160 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2162 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2165 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2168 void __iomem *port_mmio = mv_port_base(mmio, port);
2170 u32 hp_flags = hpriv->hp_flags;
2172 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2174 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2177 if (fix_phy_mode2) {
2178 m2 = readl(port_mmio + PHY_MODE2);
2181 writel(m2, port_mmio + PHY_MODE2);
2185 m2 = readl(port_mmio + PHY_MODE2);
2186 m2 &= ~((1 << 16) | (1 << 31));
2187 writel(m2, port_mmio + PHY_MODE2);
2192 /* who knows what this magic does */
2193 tmp = readl(port_mmio + PHY_MODE3);
2196 writel(tmp, port_mmio + PHY_MODE3);
2198 if (fix_phy_mode4) {
2201 m4 = readl(port_mmio + PHY_MODE4);
2203 if (hp_flags & MV_HP_ERRATA_60X1B2)
2204 tmp = readl(port_mmio + PHY_MODE3);
2206 /* workaround for errata FEr SATA#10 (part 1) */
2207 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2209 writel(m4, port_mmio + PHY_MODE4);
2211 if (hp_flags & MV_HP_ERRATA_60X1B2)
2212 writel(tmp, port_mmio + PHY_MODE3);
2215 /* Revert values of pre-emphasis and signal amps to the saved ones */
2216 m2 = readl(port_mmio + PHY_MODE2);
2218 m2 &= ~MV_M2_PREAMP_MASK;
2219 m2 |= hpriv->signal[port].amps;
2220 m2 |= hpriv->signal[port].pre;
2223 /* according to mvSata 3.6.1, some IIE values are fixed */
2224 if (IS_GEN_IIE(hpriv)) {
2229 writel(m2, port_mmio + PHY_MODE2);
2232 /* TODO: use the generic LED interface to configure the SATA Presence */
2233 /* & Acitivy LEDs on the board */
2234 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2240 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2243 void __iomem *port_mmio;
2246 port_mmio = mv_port_base(mmio, idx);
2247 tmp = readl(port_mmio + PHY_MODE2);
2249 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2250 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2254 #define ZERO(reg) writel(0, port_mmio + (reg))
2255 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2256 void __iomem *mmio, unsigned int port)
2258 void __iomem *port_mmio = mv_port_base(mmio, port);
2261 * The datasheet warns against setting ATA_RST when EDMA is active
2262 * (but doesn't say what the problem might be). So we first try
2263 * to disable the EDMA engine before doing the ATA_RST operation.
2265 mv_reset_channel(hpriv, mmio, port);
2267 ZERO(0x028); /* command */
2268 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2269 ZERO(0x004); /* timer */
2270 ZERO(0x008); /* irq err cause */
2271 ZERO(0x00c); /* irq err mask */
2272 ZERO(0x010); /* rq bah */
2273 ZERO(0x014); /* rq inp */
2274 ZERO(0x018); /* rq outp */
2275 ZERO(0x01c); /* respq bah */
2276 ZERO(0x024); /* respq outp */
2277 ZERO(0x020); /* respq inp */
2278 ZERO(0x02c); /* test control */
2279 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2284 #define ZERO(reg) writel(0, hc_mmio + (reg))
2285 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2288 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2298 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2299 void __iomem *mmio, unsigned int n_hc)
2303 for (port = 0; port < hpriv->n_ports; port++)
2304 mv_soc_reset_hc_port(hpriv, mmio, port);
2306 mv_soc_reset_one_hc(hpriv, mmio);
2311 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2317 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2322 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2324 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2326 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2328 ifctl |= (1 << 7); /* enable gen2i speed */
2329 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2333 * Caller must ensure that EDMA is not active,
2334 * by first doing mv_stop_edma() where needed.
2336 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2337 unsigned int port_no)
2339 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2341 mv_stop_edma_engine(port_mmio);
2342 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2344 if (!IS_GEN_I(hpriv)) {
2345 /* Enable 3.0gb/s link speed */
2346 mv_setup_ifctl(port_mmio, 1);
2349 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2350 * link, and physical layers. It resets all SATA interface registers
2351 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2353 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2354 udelay(25); /* allow reset propagation */
2355 writelfl(0, port_mmio + EDMA_CMD_OFS);
2357 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2359 if (IS_GEN_I(hpriv))
2363 static void mv_pmp_select(struct ata_port *ap, int pmp)
2365 if (sata_pmp_supported(ap)) {
2366 void __iomem *port_mmio = mv_ap_base(ap);
2367 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2368 int old = reg & 0xf;
2371 reg = (reg & ~0xf) | pmp;
2372 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2377 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2378 unsigned long deadline)
2380 mv_pmp_select(link->ap, sata_srst_pmp(link));
2381 return sata_std_hardreset(link, class, deadline);
2384 static int mv_softreset(struct ata_link *link, unsigned int *class,
2385 unsigned long deadline)
2387 mv_pmp_select(link->ap, sata_srst_pmp(link));
2388 return ata_sff_softreset(link, class, deadline);
2391 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2392 unsigned long deadline)
2394 struct ata_port *ap = link->ap;
2395 struct mv_host_priv *hpriv = ap->host->private_data;
2396 struct mv_port_priv *pp = ap->private_data;
2397 void __iomem *mmio = hpriv->base;
2398 int rc, attempts = 0, extra = 0;
2402 mv_reset_channel(hpriv, mmio, ap->port_no);
2403 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2405 /* Workaround for errata FEr SATA#10 (part 2) */
2407 const unsigned long *timing =
2408 sata_ehc_deb_timing(&link->eh_context);
2410 rc = sata_link_hardreset(link, timing, deadline + extra,
2414 sata_scr_read(link, SCR_STATUS, &sstatus);
2415 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2416 /* Force 1.5gb/s link speed and try again */
2417 mv_setup_ifctl(mv_ap_base(ap), 0);
2418 if (time_after(jiffies + HZ, deadline))
2419 extra = HZ; /* only extend it once, max */
2421 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2426 static void mv_eh_freeze(struct ata_port *ap)
2428 struct mv_host_priv *hpriv = ap->host->private_data;
2429 unsigned int shift, hardport, port = ap->port_no;
2432 /* FIXME: handle coalescing completion events properly */
2435 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2437 /* disable assertion of portN err, done events */
2438 main_mask = readl(hpriv->main_mask_reg_addr);
2439 main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2440 writelfl(main_mask, hpriv->main_mask_reg_addr);
2443 static void mv_eh_thaw(struct ata_port *ap)
2445 struct mv_host_priv *hpriv = ap->host->private_data;
2446 unsigned int shift, hardport, port = ap->port_no;
2447 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
2448 void __iomem *port_mmio = mv_ap_base(ap);
2449 u32 main_mask, hc_irq_cause;
2451 /* FIXME: handle coalescing completion events properly */
2453 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2455 /* clear EDMA errors on this port */
2456 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2458 /* clear pending irq events */
2459 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2460 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2461 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2463 /* enable assertion of portN err, done events */
2464 main_mask = readl(hpriv->main_mask_reg_addr);
2465 main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2466 writelfl(main_mask, hpriv->main_mask_reg_addr);
2470 * mv_port_init - Perform some early initialization on a single port.
2471 * @port: libata data structure storing shadow register addresses
2472 * @port_mmio: base address of the port
2474 * Initialize shadow register mmio addresses, clear outstanding
2475 * interrupts on the port, and unmask interrupts for the future
2476 * start of the port.
2479 * Inherited from caller.
2481 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2483 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2486 /* PIO related setup
2488 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2490 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2491 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2492 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2493 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2494 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2495 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2497 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2498 /* special case: control/altstatus doesn't have ATA_REG_ address */
2499 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2502 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2504 /* Clear any currently outstanding port interrupt conditions */
2505 serr_ofs = mv_scr_offset(SCR_ERROR);
2506 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2507 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2509 /* unmask all non-transient EDMA error interrupts */
2510 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2512 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2513 readl(port_mmio + EDMA_CFG_OFS),
2514 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2515 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2518 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2520 struct pci_dev *pdev = to_pci_dev(host->dev);
2521 struct mv_host_priv *hpriv = host->private_data;
2522 u32 hp_flags = hpriv->hp_flags;
2524 switch (board_idx) {
2526 hpriv->ops = &mv5xxx_ops;
2527 hp_flags |= MV_HP_GEN_I;
2529 switch (pdev->revision) {
2531 hp_flags |= MV_HP_ERRATA_50XXB0;
2534 hp_flags |= MV_HP_ERRATA_50XXB2;
2537 dev_printk(KERN_WARNING, &pdev->dev,
2538 "Applying 50XXB2 workarounds to unknown rev\n");
2539 hp_flags |= MV_HP_ERRATA_50XXB2;
2546 hpriv->ops = &mv5xxx_ops;
2547 hp_flags |= MV_HP_GEN_I;
2549 switch (pdev->revision) {
2551 hp_flags |= MV_HP_ERRATA_50XXB0;
2554 hp_flags |= MV_HP_ERRATA_50XXB2;
2557 dev_printk(KERN_WARNING, &pdev->dev,
2558 "Applying B2 workarounds to unknown rev\n");
2559 hp_flags |= MV_HP_ERRATA_50XXB2;
2566 hpriv->ops = &mv6xxx_ops;
2567 hp_flags |= MV_HP_GEN_II;
2569 switch (pdev->revision) {
2571 hp_flags |= MV_HP_ERRATA_60X1B2;
2574 hp_flags |= MV_HP_ERRATA_60X1C0;
2577 dev_printk(KERN_WARNING, &pdev->dev,
2578 "Applying B2 workarounds to unknown rev\n");
2579 hp_flags |= MV_HP_ERRATA_60X1B2;
2585 hp_flags |= MV_HP_PCIE;
2586 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2587 (pdev->device == 0x2300 || pdev->device == 0x2310))
2590 * Highpoint RocketRAID PCIe 23xx series cards:
2592 * Unconfigured drives are treated as "Legacy"
2593 * by the BIOS, and it overwrites sector 8 with
2594 * a "Lgcy" metadata block prior to Linux boot.
2596 * Configured drives (RAID or JBOD) leave sector 8
2597 * alone, but instead overwrite a high numbered
2598 * sector for the RAID metadata. This sector can
2599 * be determined exactly, by truncating the physical
2600 * drive capacity to a nice even GB value.
2602 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2604 * Warn the user, lest they think we're just buggy.
2606 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2607 " BIOS CORRUPTS DATA on all attached drives,"
2608 " regardless of if/how they are configured."
2610 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2611 " use sectors 8-9 on \"Legacy\" drives,"
2612 " and avoid the final two gigabytes on"
2613 " all RocketRAID BIOS initialized drives.\n");
2616 hpriv->ops = &mv6xxx_ops;
2617 hp_flags |= MV_HP_GEN_IIE;
2619 switch (pdev->revision) {
2621 hp_flags |= MV_HP_ERRATA_XX42A0;
2624 hp_flags |= MV_HP_ERRATA_60X1C0;
2627 dev_printk(KERN_WARNING, &pdev->dev,
2628 "Applying 60X1C0 workarounds to unknown rev\n");
2629 hp_flags |= MV_HP_ERRATA_60X1C0;
2634 hpriv->ops = &mv_soc_ops;
2635 hp_flags |= MV_HP_ERRATA_60X1C0;
2639 dev_printk(KERN_ERR, host->dev,
2640 "BUG: invalid board index %u\n", board_idx);
2644 hpriv->hp_flags = hp_flags;
2645 if (hp_flags & MV_HP_PCIE) {
2646 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2647 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2648 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2650 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2651 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2652 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2659 * mv_init_host - Perform some early initialization of the host.
2660 * @host: ATA host to initialize
2661 * @board_idx: controller index
2663 * If possible, do an early global reset of the host. Then do
2664 * our port init and clear/unmask all/relevant host interrupts.
2667 * Inherited from caller.
2669 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2671 int rc = 0, n_hc, port, hc;
2672 struct mv_host_priv *hpriv = host->private_data;
2673 void __iomem *mmio = hpriv->base;
2675 rc = mv_chip_id(host, board_idx);
2679 if (HAS_PCI(host)) {
2680 hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
2681 hpriv->main_mask_reg_addr = mmio + HC_MAIN_IRQ_MASK_OFS;
2683 hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
2684 hpriv->main_mask_reg_addr = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
2687 /* global interrupt mask: 0 == mask everything */
2688 writel(0, hpriv->main_mask_reg_addr);
2690 n_hc = mv_get_hc_count(host->ports[0]->flags);
2692 for (port = 0; port < host->n_ports; port++)
2693 hpriv->ops->read_preamp(hpriv, port, mmio);
2695 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2699 hpriv->ops->reset_flash(hpriv, mmio);
2700 hpriv->ops->reset_bus(host, mmio);
2701 hpriv->ops->enable_leds(hpriv, mmio);
2703 for (port = 0; port < host->n_ports; port++) {
2704 struct ata_port *ap = host->ports[port];
2705 void __iomem *port_mmio = mv_port_base(mmio, port);
2707 mv_port_init(&ap->ioaddr, port_mmio);
2710 if (HAS_PCI(host)) {
2711 unsigned int offset = port_mmio - mmio;
2712 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2713 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2718 for (hc = 0; hc < n_hc; hc++) {
2719 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2721 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2722 "(before clear)=0x%08x\n", hc,
2723 readl(hc_mmio + HC_CFG_OFS),
2724 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2726 /* Clear any currently outstanding hc interrupt conditions */
2727 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2730 if (HAS_PCI(host)) {
2731 /* Clear any currently outstanding host interrupt conditions */
2732 writelfl(0, mmio + hpriv->irq_cause_ofs);
2734 /* and unmask interrupt generation for host regs */
2735 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2736 if (IS_GEN_I(hpriv))
2737 writelfl(~HC_MAIN_MASKED_IRQS_5,
2738 hpriv->main_mask_reg_addr);
2740 writelfl(~HC_MAIN_MASKED_IRQS,
2741 hpriv->main_mask_reg_addr);
2743 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2744 "PCI int cause/mask=0x%08x/0x%08x\n",
2745 readl(hpriv->main_cause_reg_addr),
2746 readl(hpriv->main_mask_reg_addr),
2747 readl(mmio + hpriv->irq_cause_ofs),
2748 readl(mmio + hpriv->irq_mask_ofs));
2750 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2751 hpriv->main_mask_reg_addr);
2752 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2753 readl(hpriv->main_cause_reg_addr),
2754 readl(hpriv->main_mask_reg_addr));
2760 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2762 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2764 if (!hpriv->crqb_pool)
2767 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2769 if (!hpriv->crpb_pool)
2772 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2774 if (!hpriv->sg_tbl_pool)
2780 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2781 struct mbus_dram_target_info *dram)
2785 for (i = 0; i < 4; i++) {
2786 writel(0, hpriv->base + WINDOW_CTRL(i));
2787 writel(0, hpriv->base + WINDOW_BASE(i));
2790 for (i = 0; i < dram->num_cs; i++) {
2791 struct mbus_dram_window *cs = dram->cs + i;
2793 writel(((cs->size - 1) & 0xffff0000) |
2794 (cs->mbus_attr << 8) |
2795 (dram->mbus_dram_target_id << 4) | 1,
2796 hpriv->base + WINDOW_CTRL(i));
2797 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2802 * mv_platform_probe - handle a positive probe of an soc Marvell
2804 * @pdev: platform device found
2807 * Inherited from caller.
2809 static int mv_platform_probe(struct platform_device *pdev)
2811 static int printed_version;
2812 const struct mv_sata_platform_data *mv_platform_data;
2813 const struct ata_port_info *ppi[] =
2814 { &mv_port_info[chip_soc], NULL };
2815 struct ata_host *host;
2816 struct mv_host_priv *hpriv;
2817 struct resource *res;
2820 if (!printed_version++)
2821 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2824 * Simple resource validation ..
2826 if (unlikely(pdev->num_resources != 2)) {
2827 dev_err(&pdev->dev, "invalid number of resources\n");
2832 * Get the register base first
2834 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2839 mv_platform_data = pdev->dev.platform_data;
2840 n_ports = mv_platform_data->n_ports;
2842 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2843 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2845 if (!host || !hpriv)
2847 host->private_data = hpriv;
2848 hpriv->n_ports = n_ports;
2851 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2852 res->end - res->start + 1);
2853 hpriv->base -= MV_SATAHC0_REG_BASE;
2856 * (Re-)program MBUS remapping windows if we are asked to.
2858 if (mv_platform_data->dram != NULL)
2859 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2861 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2865 /* initialize adapter */
2866 rc = mv_init_host(host, chip_soc);
2870 dev_printk(KERN_INFO, &pdev->dev,
2871 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2874 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2875 IRQF_SHARED, &mv6_sht);
2880 * mv_platform_remove - unplug a platform interface
2881 * @pdev: platform device
2883 * A platform bus SATA device has been unplugged. Perform the needed
2884 * cleanup. Also called on module unload for any active devices.
2886 static int __devexit mv_platform_remove(struct platform_device *pdev)
2888 struct device *dev = &pdev->dev;
2889 struct ata_host *host = dev_get_drvdata(dev);
2891 ata_host_detach(host);
2895 static struct platform_driver mv_platform_driver = {
2896 .probe = mv_platform_probe,
2897 .remove = __devexit_p(mv_platform_remove),
2900 .owner = THIS_MODULE,
2906 static int mv_pci_init_one(struct pci_dev *pdev,
2907 const struct pci_device_id *ent);
2910 static struct pci_driver mv_pci_driver = {
2912 .id_table = mv_pci_tbl,
2913 .probe = mv_pci_init_one,
2914 .remove = ata_pci_remove_one,
2920 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2923 /* move to PCI layer or libata core? */
2924 static int pci_go_64(struct pci_dev *pdev)
2928 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2929 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2931 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2933 dev_printk(KERN_ERR, &pdev->dev,
2934 "64-bit DMA enable failed\n");
2939 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2941 dev_printk(KERN_ERR, &pdev->dev,
2942 "32-bit DMA enable failed\n");
2945 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2947 dev_printk(KERN_ERR, &pdev->dev,
2948 "32-bit consistent DMA enable failed\n");
2957 * mv_print_info - Dump key info to kernel log for perusal.
2958 * @host: ATA host to print info about
2960 * FIXME: complete this.
2963 * Inherited from caller.
2965 static void mv_print_info(struct ata_host *host)
2967 struct pci_dev *pdev = to_pci_dev(host->dev);
2968 struct mv_host_priv *hpriv = host->private_data;
2970 const char *scc_s, *gen;
2972 /* Use this to determine the HW stepping of the chip so we know
2973 * what errata to workaround
2975 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2978 else if (scc == 0x01)
2983 if (IS_GEN_I(hpriv))
2985 else if (IS_GEN_II(hpriv))
2987 else if (IS_GEN_IIE(hpriv))
2992 dev_printk(KERN_INFO, &pdev->dev,
2993 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2994 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2995 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2999 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3000 * @pdev: PCI device found
3001 * @ent: PCI device ID entry for the matched host
3004 * Inherited from caller.
3006 static int mv_pci_init_one(struct pci_dev *pdev,
3007 const struct pci_device_id *ent)
3009 static int printed_version;
3010 unsigned int board_idx = (unsigned int)ent->driver_data;
3011 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3012 struct ata_host *host;
3013 struct mv_host_priv *hpriv;
3016 if (!printed_version++)
3017 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3020 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3022 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3023 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3024 if (!host || !hpriv)
3026 host->private_data = hpriv;
3027 hpriv->n_ports = n_ports;
3029 /* acquire resources */
3030 rc = pcim_enable_device(pdev);
3034 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3036 pcim_pin_device(pdev);
3039 host->iomap = pcim_iomap_table(pdev);
3040 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3042 rc = pci_go_64(pdev);
3046 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3050 /* initialize adapter */
3051 rc = mv_init_host(host, board_idx);
3055 /* Enable interrupts */
3056 if (msi && pci_enable_msi(pdev))
3059 mv_dump_pci_cfg(pdev, 0x68);
3060 mv_print_info(host);
3062 pci_set_master(pdev);
3063 pci_try_set_mwi(pdev);
3064 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3065 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3069 static int mv_platform_probe(struct platform_device *pdev);
3070 static int __devexit mv_platform_remove(struct platform_device *pdev);
3072 static int __init mv_init(void)
3076 rc = pci_register_driver(&mv_pci_driver);
3080 rc = platform_driver_register(&mv_platform_driver);
3084 pci_unregister_driver(&mv_pci_driver);
3089 static void __exit mv_exit(void)
3092 pci_unregister_driver(&mv_pci_driver);
3094 platform_driver_unregister(&mv_platform_driver);
3097 MODULE_AUTHOR("Brett Russ");
3098 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3099 MODULE_LICENSE("GPL");
3100 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3101 MODULE_VERSION(DRV_VERSION);
3102 MODULE_ALIAS("platform:" DRV_NAME);
3105 module_param(msi, int, 0444);
3106 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3109 module_init(mv_init);
3110 module_exit(mv_exit);