2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Improve/fix IRQ and error handling sequences.
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42 6) Add port multiplier support (intermediate)
44 8) Develop a low-power-consumption strategy, and implement it.
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dma-mapping.h>
73 #include <linux/device.h>
74 #include <scsi/scsi_host.h>
75 #include <scsi/scsi_cmnd.h>
76 #include <scsi/scsi_device.h>
77 #include <linux/libata.h>
79 #define DRV_NAME "sata_mv"
80 #define DRV_VERSION "1.20"
83 /* BAR's are enumerated in terms of pci_resource_start() terms */
84 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
85 MV_IO_BAR = 2, /* offset 0x18: IO space */
86 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
88 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
89 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
92 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
93 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
94 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
95 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
96 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
97 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
99 MV_SATAHC0_REG_BASE = 0x20000,
100 MV_FLASH_CTL = 0x1046c,
101 MV_GPIO_PORT_CTL = 0x104f0,
102 MV_RESET_CFG = 0x180d8,
104 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
105 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
106 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
107 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
110 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
112 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
113 * CRPB needs alignment on a 256B boundary. Size == 256B
114 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
116 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
117 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
119 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
122 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
123 MV_PORT_HC_SHIFT = 2,
124 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
128 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
129 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
130 /* SoC integrated controllers, no PCI interface */
131 MV_FLAG_SOC = (1 << 28),
133 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
134 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
135 ATA_FLAG_PIO_POLLING,
136 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
138 CRQB_FLAG_READ = (1 << 0),
140 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
141 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
142 CRQB_CMD_ADDR_SHIFT = 8,
143 CRQB_CMD_CS = (0x2 << 11),
144 CRQB_CMD_LAST = (1 << 15),
146 CRPB_FLAG_STATUS_SHIFT = 8,
147 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
148 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
150 EPRD_FLAG_END_OF_TBL = (1 << 31),
152 /* PCI interface registers */
154 PCI_COMMAND_OFS = 0xc00,
156 PCI_MAIN_CMD_STS_OFS = 0xd30,
157 STOP_PCI_MASTER = (1 << 2),
158 PCI_MASTER_EMPTY = (1 << 3),
159 GLOB_SFT_RST = (1 << 4),
162 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
163 MV_PCI_DISC_TIMER = 0xd04,
164 MV_PCI_MSI_TRIGGER = 0xc38,
165 MV_PCI_SERR_MASK = 0xc28,
166 MV_PCI_XBAR_TMOUT = 0x1d04,
167 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
168 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
169 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
170 MV_PCI_ERR_COMMAND = 0x1d50,
172 PCI_IRQ_CAUSE_OFS = 0x1d58,
173 PCI_IRQ_MASK_OFS = 0x1d5c,
174 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
176 PCIE_IRQ_CAUSE_OFS = 0x1900,
177 PCIE_IRQ_MASK_OFS = 0x1910,
178 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
180 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
181 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
182 PORT0_ERR = (1 << 0), /* shift by port # */
183 PORT0_DONE = (1 << 1), /* shift by port # */
184 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
185 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
187 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
188 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
189 PORTS_0_3_COAL_DONE = (1 << 8),
190 PORTS_4_7_COAL_DONE = (1 << 17),
191 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
192 GPIO_INT = (1 << 22),
193 SELF_INT = (1 << 23),
194 TWSI_INT = (1 << 24),
195 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
196 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
197 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
198 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
200 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
203 /* SATAHC registers */
206 HC_IRQ_CAUSE_OFS = 0x14,
207 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
208 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
209 DEV_IRQ = (1 << 8), /* shift by port # */
211 /* Shadow block registers */
213 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
216 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
217 SATA_ACTIVE_OFS = 0x350,
218 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
225 SATA_INTERFACE_CTL = 0x050,
227 MV_M2_PREAMP_MASK = 0x7e0,
231 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
232 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
233 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
234 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
235 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
237 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
238 EDMA_ERR_IRQ_MASK_OFS = 0xc,
239 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
240 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
241 EDMA_ERR_DEV = (1 << 2), /* device error */
242 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
243 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
244 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
245 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
246 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
247 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
248 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
249 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
250 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
251 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
252 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
254 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
255 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
256 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
257 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
258 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
260 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
262 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
263 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
264 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
265 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
266 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
267 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
269 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
271 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
272 EDMA_ERR_OVERRUN_5 = (1 << 5),
273 EDMA_ERR_UNDERRUN_5 = (1 << 6),
275 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
276 EDMA_ERR_LNK_CTRL_RX_1 |
277 EDMA_ERR_LNK_CTRL_RX_3 |
278 EDMA_ERR_LNK_CTRL_TX,
280 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
290 EDMA_ERR_LNK_CTRL_RX_2 |
291 EDMA_ERR_LNK_DATA_RX |
292 EDMA_ERR_LNK_DATA_TX |
293 EDMA_ERR_TRANS_PROTO,
294 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
299 EDMA_ERR_UNDERRUN_5 |
300 EDMA_ERR_SELF_DIS_5 |
306 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
307 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
309 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
310 EDMA_REQ_Q_PTR_SHIFT = 5,
312 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
313 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
314 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
315 EDMA_RSP_Q_PTR_SHIFT = 3,
317 EDMA_CMD_OFS = 0x28, /* EDMA command register */
318 EDMA_EN = (1 << 0), /* enable EDMA */
319 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
320 ATA_RST = (1 << 2), /* reset trans/link/phy */
322 EDMA_IORDY_TMOUT = 0x34,
325 /* Host private flags (hp_flags) */
326 MV_HP_FLAG_MSI = (1 << 0),
327 MV_HP_ERRATA_50XXB0 = (1 << 1),
328 MV_HP_ERRATA_50XXB2 = (1 << 2),
329 MV_HP_ERRATA_60X1B2 = (1 << 3),
330 MV_HP_ERRATA_60X1C0 = (1 << 4),
331 MV_HP_ERRATA_XX42A0 = (1 << 5),
332 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
333 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
334 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
335 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
337 /* Port private flags (pp_flags) */
338 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
339 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
340 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
343 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
344 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
345 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
346 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
349 /* DMA boundary 0xffff is required by the s/g splitting
350 * we need on /length/ in mv_fill-sg().
352 MV_DMA_BOUNDARY = 0xffffU,
354 /* mask of register bits containing lower 32 bits
355 * of EDMA request queue DMA address
357 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
359 /* ditto, for response queue */
360 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
373 /* Command ReQuest Block: 32B */
389 /* Command ResPonse Block: 8B */
396 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
404 struct mv_port_priv {
405 struct mv_crqb *crqb;
407 struct mv_crpb *crpb;
409 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
410 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
412 unsigned int req_idx;
413 unsigned int resp_idx;
418 struct mv_port_signal {
423 struct mv_host_priv {
425 struct mv_port_signal signal[8];
426 const struct mv_hw_ops *ops;
431 * These consistent DMA memory pools give us guaranteed
432 * alignment for hardware-accessed data structures,
433 * and less memory waste in accomplishing the alignment.
435 struct dma_pool *crqb_pool;
436 struct dma_pool *crpb_pool;
437 struct dma_pool *sg_tbl_pool;
441 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
443 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
444 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
446 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
448 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
449 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
452 static void mv_irq_clear(struct ata_port *ap);
453 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
454 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
455 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
456 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
457 static int mv_port_start(struct ata_port *ap);
458 static void mv_port_stop(struct ata_port *ap);
459 static void mv_qc_prep(struct ata_queued_cmd *qc);
460 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
461 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
462 static void mv_error_handler(struct ata_port *ap);
463 static void mv_eh_freeze(struct ata_port *ap);
464 static void mv_eh_thaw(struct ata_port *ap);
465 static void mv6_dev_config(struct ata_device *dev);
467 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
469 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
470 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
472 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
474 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
475 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
477 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
479 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
480 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
482 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
484 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
485 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
486 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
487 unsigned int port_no);
488 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
489 void __iomem *port_mmio, int want_ncq);
490 static int __mv_stop_dma(struct ata_port *ap);
492 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
493 * because we have to allow room for worst case splitting of
494 * PRDs for 64K boundaries in mv_fill_sg().
496 static struct scsi_host_template mv5_sht = {
497 .module = THIS_MODULE,
499 .ioctl = ata_scsi_ioctl,
500 .queuecommand = ata_scsi_queuecmd,
501 .can_queue = ATA_DEF_QUEUE,
502 .this_id = ATA_SHT_THIS_ID,
503 .sg_tablesize = MV_MAX_SG_CT / 2,
504 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
505 .emulated = ATA_SHT_EMULATED,
507 .proc_name = DRV_NAME,
508 .dma_boundary = MV_DMA_BOUNDARY,
509 .slave_configure = ata_scsi_slave_config,
510 .slave_destroy = ata_scsi_slave_destroy,
511 .bios_param = ata_std_bios_param,
514 static struct scsi_host_template mv6_sht = {
515 .module = THIS_MODULE,
517 .ioctl = ata_scsi_ioctl,
518 .queuecommand = ata_scsi_queuecmd,
519 .change_queue_depth = ata_scsi_change_queue_depth,
520 .can_queue = MV_MAX_Q_DEPTH - 1,
521 .this_id = ATA_SHT_THIS_ID,
522 .sg_tablesize = MV_MAX_SG_CT / 2,
523 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
524 .emulated = ATA_SHT_EMULATED,
526 .proc_name = DRV_NAME,
527 .dma_boundary = MV_DMA_BOUNDARY,
528 .slave_configure = ata_scsi_slave_config,
529 .slave_destroy = ata_scsi_slave_destroy,
530 .bios_param = ata_std_bios_param,
533 static const struct ata_port_operations mv5_ops = {
534 .tf_load = ata_tf_load,
535 .tf_read = ata_tf_read,
536 .check_status = ata_check_status,
537 .exec_command = ata_exec_command,
538 .dev_select = ata_std_dev_select,
540 .cable_detect = ata_cable_sata,
542 .qc_prep = mv_qc_prep,
543 .qc_issue = mv_qc_issue,
544 .data_xfer = ata_data_xfer,
546 .irq_clear = mv_irq_clear,
547 .irq_on = ata_irq_on,
549 .error_handler = mv_error_handler,
550 .freeze = mv_eh_freeze,
553 .scr_read = mv5_scr_read,
554 .scr_write = mv5_scr_write,
556 .port_start = mv_port_start,
557 .port_stop = mv_port_stop,
560 static const struct ata_port_operations mv6_ops = {
561 .dev_config = mv6_dev_config,
562 .tf_load = ata_tf_load,
563 .tf_read = ata_tf_read,
564 .check_status = ata_check_status,
565 .exec_command = ata_exec_command,
566 .dev_select = ata_std_dev_select,
568 .cable_detect = ata_cable_sata,
570 .qc_prep = mv_qc_prep,
571 .qc_issue = mv_qc_issue,
572 .data_xfer = ata_data_xfer,
574 .irq_clear = mv_irq_clear,
575 .irq_on = ata_irq_on,
577 .error_handler = mv_error_handler,
578 .freeze = mv_eh_freeze,
580 .qc_defer = ata_std_qc_defer,
582 .scr_read = mv_scr_read,
583 .scr_write = mv_scr_write,
585 .port_start = mv_port_start,
586 .port_stop = mv_port_stop,
589 static const struct ata_port_operations mv_iie_ops = {
590 .tf_load = ata_tf_load,
591 .tf_read = ata_tf_read,
592 .check_status = ata_check_status,
593 .exec_command = ata_exec_command,
594 .dev_select = ata_std_dev_select,
596 .cable_detect = ata_cable_sata,
598 .qc_prep = mv_qc_prep_iie,
599 .qc_issue = mv_qc_issue,
600 .data_xfer = ata_data_xfer,
602 .irq_clear = mv_irq_clear,
603 .irq_on = ata_irq_on,
605 .error_handler = mv_error_handler,
606 .freeze = mv_eh_freeze,
608 .qc_defer = ata_std_qc_defer,
610 .scr_read = mv_scr_read,
611 .scr_write = mv_scr_write,
613 .port_start = mv_port_start,
614 .port_stop = mv_port_stop,
617 static const struct ata_port_info mv_port_info[] = {
619 .flags = MV_COMMON_FLAGS,
620 .pio_mask = 0x1f, /* pio0-4 */
621 .udma_mask = ATA_UDMA6,
622 .port_ops = &mv5_ops,
625 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
626 .pio_mask = 0x1f, /* pio0-4 */
627 .udma_mask = ATA_UDMA6,
628 .port_ops = &mv5_ops,
631 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
632 .pio_mask = 0x1f, /* pio0-4 */
633 .udma_mask = ATA_UDMA6,
634 .port_ops = &mv5_ops,
637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
639 .pio_mask = 0x1f, /* pio0-4 */
640 .udma_mask = ATA_UDMA6,
641 .port_ops = &mv6_ops,
644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
645 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
646 .pio_mask = 0x1f, /* pio0-4 */
647 .udma_mask = ATA_UDMA6,
648 .port_ops = &mv6_ops,
651 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
653 .pio_mask = 0x1f, /* pio0-4 */
654 .udma_mask = ATA_UDMA6,
655 .port_ops = &mv_iie_ops,
658 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
660 .pio_mask = 0x1f, /* pio0-4 */
661 .udma_mask = ATA_UDMA6,
662 .port_ops = &mv_iie_ops,
666 static const struct pci_device_id mv_pci_tbl[] = {
667 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
668 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
669 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
670 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
671 /* RocketRAID 1740/174x have different identifiers */
672 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
673 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
675 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
676 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
677 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
678 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
679 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
681 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
684 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
686 /* Marvell 7042 support */
687 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
689 /* Highpoint RocketRAID PCIe series */
690 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
691 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
693 { } /* terminate list */
696 static const struct mv_hw_ops mv5xxx_ops = {
697 .phy_errata = mv5_phy_errata,
698 .enable_leds = mv5_enable_leds,
699 .read_preamp = mv5_read_preamp,
700 .reset_hc = mv5_reset_hc,
701 .reset_flash = mv5_reset_flash,
702 .reset_bus = mv5_reset_bus,
705 static const struct mv_hw_ops mv6xxx_ops = {
706 .phy_errata = mv6_phy_errata,
707 .enable_leds = mv6_enable_leds,
708 .read_preamp = mv6_read_preamp,
709 .reset_hc = mv6_reset_hc,
710 .reset_flash = mv6_reset_flash,
711 .reset_bus = mv_reset_pci_bus,
718 static inline void writelfl(unsigned long data, void __iomem *addr)
721 (void) readl(addr); /* flush to avoid PCI posted write */
724 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
726 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
729 static inline unsigned int mv_hc_from_port(unsigned int port)
731 return port >> MV_PORT_HC_SHIFT;
734 static inline unsigned int mv_hardport_from_port(unsigned int port)
736 return port & MV_PORT_MASK;
739 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
742 return mv_hc_base(base, mv_hc_from_port(port));
745 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
747 return mv_hc_base_from_port(base, port) +
748 MV_SATAHC_ARBTR_REG_SZ +
749 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
752 static inline void __iomem *mv_ap_base(struct ata_port *ap)
754 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
757 static inline int mv_get_hc_count(unsigned long port_flags)
759 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
762 static void mv_irq_clear(struct ata_port *ap)
766 static void mv_set_edma_ptrs(void __iomem *port_mmio,
767 struct mv_host_priv *hpriv,
768 struct mv_port_priv *pp)
773 * initialize request queue
775 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
777 WARN_ON(pp->crqb_dma & 0x3ff);
778 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
779 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
780 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
782 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
783 writelfl((pp->crqb_dma & 0xffffffff) | index,
784 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
786 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
789 * initialize response queue
791 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
793 WARN_ON(pp->crpb_dma & 0xff);
794 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
796 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
797 writelfl((pp->crpb_dma & 0xffffffff) | index,
798 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
800 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
802 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
803 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
807 * mv_start_dma - Enable eDMA engine
808 * @base: port base address
809 * @pp: port private data
811 * Verify the local cache of the eDMA state is accurate with a
815 * Inherited from caller.
817 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
818 struct mv_port_priv *pp, u8 protocol)
820 int want_ncq = (protocol == ATA_PROT_NCQ);
822 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
823 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
824 if (want_ncq != using_ncq)
827 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
828 struct mv_host_priv *hpriv = ap->host->private_data;
829 int hard_port = mv_hardport_from_port(ap->port_no);
830 void __iomem *hc_mmio = mv_hc_base_from_port(
831 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
832 u32 hc_irq_cause, ipending;
834 /* clear EDMA event indicators, if any */
835 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
837 /* clear EDMA interrupt indicator, if any */
838 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
839 ipending = (DEV_IRQ << hard_port) |
840 (CRPB_DMA_DONE << hard_port);
841 if (hc_irq_cause & ipending) {
842 writelfl(hc_irq_cause & ~ipending,
843 hc_mmio + HC_IRQ_CAUSE_OFS);
846 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
848 /* clear FIS IRQ Cause */
849 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
851 mv_set_edma_ptrs(port_mmio, hpriv, pp);
853 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
854 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
856 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
860 * __mv_stop_dma - Disable eDMA engine
861 * @ap: ATA channel to manipulate
863 * Verify the local cache of the eDMA state is accurate with a
867 * Inherited from caller.
869 static int __mv_stop_dma(struct ata_port *ap)
871 void __iomem *port_mmio = mv_ap_base(ap);
872 struct mv_port_priv *pp = ap->private_data;
876 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
877 /* Disable EDMA if active. The disable bit auto clears.
879 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
880 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
882 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
885 /* now properly wait for the eDMA to stop */
886 for (i = 1000; i > 0; i--) {
887 reg = readl(port_mmio + EDMA_CMD_OFS);
888 if (!(reg & EDMA_EN))
895 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
902 static int mv_stop_dma(struct ata_port *ap)
907 spin_lock_irqsave(&ap->host->lock, flags);
908 rc = __mv_stop_dma(ap);
909 spin_unlock_irqrestore(&ap->host->lock, flags);
915 static void mv_dump_mem(void __iomem *start, unsigned bytes)
918 for (b = 0; b < bytes; ) {
919 DPRINTK("%p: ", start + b);
920 for (w = 0; b < bytes && w < 4; w++) {
921 printk("%08x ", readl(start + b));
929 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
934 for (b = 0; b < bytes; ) {
935 DPRINTK("%02x: ", b);
936 for (w = 0; b < bytes && w < 4; w++) {
937 (void) pci_read_config_dword(pdev, b, &dw);
945 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
946 struct pci_dev *pdev)
949 void __iomem *hc_base = mv_hc_base(mmio_base,
950 port >> MV_PORT_HC_SHIFT);
951 void __iomem *port_base;
952 int start_port, num_ports, p, start_hc, num_hcs, hc;
955 start_hc = start_port = 0;
956 num_ports = 8; /* shld be benign for 4 port devs */
959 start_hc = port >> MV_PORT_HC_SHIFT;
961 num_ports = num_hcs = 1;
963 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
964 num_ports > 1 ? num_ports - 1 : start_port);
967 DPRINTK("PCI config space regs:\n");
968 mv_dump_pci_cfg(pdev, 0x68);
970 DPRINTK("PCI regs:\n");
971 mv_dump_mem(mmio_base+0xc00, 0x3c);
972 mv_dump_mem(mmio_base+0xd00, 0x34);
973 mv_dump_mem(mmio_base+0xf00, 0x4);
974 mv_dump_mem(mmio_base+0x1d00, 0x6c);
975 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
976 hc_base = mv_hc_base(mmio_base, hc);
977 DPRINTK("HC regs (HC %i):\n", hc);
978 mv_dump_mem(hc_base, 0x1c);
980 for (p = start_port; p < start_port + num_ports; p++) {
981 port_base = mv_port_base(mmio_base, p);
982 DPRINTK("EDMA regs (port %i):\n", p);
983 mv_dump_mem(port_base, 0x54);
984 DPRINTK("SATA regs (port %i):\n", p);
985 mv_dump_mem(port_base+0x300, 0x60);
990 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
998 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1001 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1010 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1012 unsigned int ofs = mv_scr_offset(sc_reg_in);
1014 if (ofs != 0xffffffffU) {
1015 *val = readl(mv_ap_base(ap) + ofs);
1021 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1023 unsigned int ofs = mv_scr_offset(sc_reg_in);
1025 if (ofs != 0xffffffffU) {
1026 writelfl(val, mv_ap_base(ap) + ofs);
1032 static void mv6_dev_config(struct ata_device *adev)
1035 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1036 * See mv_qc_prep() for more info.
1038 if (adev->flags & ATA_DFLAG_NCQ)
1039 if (adev->max_sectors > ATA_MAX_SECTORS)
1040 adev->max_sectors = ATA_MAX_SECTORS;
1043 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1044 void __iomem *port_mmio, int want_ncq)
1048 /* set up non-NCQ EDMA configuration */
1049 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1051 if (IS_GEN_I(hpriv))
1052 cfg |= (1 << 8); /* enab config burst size mask */
1054 else if (IS_GEN_II(hpriv))
1055 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1057 else if (IS_GEN_IIE(hpriv)) {
1058 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1059 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1060 cfg |= (1 << 18); /* enab early completion */
1061 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1065 cfg |= EDMA_CFG_NCQ;
1066 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1068 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1070 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1073 static void mv_port_free_dma_mem(struct ata_port *ap)
1075 struct mv_host_priv *hpriv = ap->host->private_data;
1076 struct mv_port_priv *pp = ap->private_data;
1080 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1084 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1088 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1089 * For later hardware, we have one unique sg_tbl per NCQ tag.
1091 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1092 if (pp->sg_tbl[tag]) {
1093 if (tag == 0 || !IS_GEN_I(hpriv))
1094 dma_pool_free(hpriv->sg_tbl_pool,
1096 pp->sg_tbl_dma[tag]);
1097 pp->sg_tbl[tag] = NULL;
1103 * mv_port_start - Port specific init/start routine.
1104 * @ap: ATA channel to manipulate
1106 * Allocate and point to DMA memory, init port private memory,
1110 * Inherited from caller.
1112 static int mv_port_start(struct ata_port *ap)
1114 struct device *dev = ap->host->dev;
1115 struct mv_host_priv *hpriv = ap->host->private_data;
1116 struct mv_port_priv *pp;
1117 void __iomem *port_mmio = mv_ap_base(ap);
1118 unsigned long flags;
1121 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1124 ap->private_data = pp;
1126 rc = ata_pad_alloc(ap, dev);
1130 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1133 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1135 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1137 goto out_port_free_dma_mem;
1138 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1141 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1142 * For later hardware, we need one unique sg_tbl per NCQ tag.
1144 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1145 if (tag == 0 || !IS_GEN_I(hpriv)) {
1146 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1147 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1148 if (!pp->sg_tbl[tag])
1149 goto out_port_free_dma_mem;
1151 pp->sg_tbl[tag] = pp->sg_tbl[0];
1152 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1156 spin_lock_irqsave(&ap->host->lock, flags);
1158 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1159 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1161 spin_unlock_irqrestore(&ap->host->lock, flags);
1163 /* Don't turn on EDMA here...do it before DMA commands only. Else
1164 * we'll be unable to send non-data, PIO, etc due to restricted access
1169 out_port_free_dma_mem:
1170 mv_port_free_dma_mem(ap);
1175 * mv_port_stop - Port specific cleanup/stop routine.
1176 * @ap: ATA channel to manipulate
1178 * Stop DMA, cleanup port memory.
1181 * This routine uses the host lock to protect the DMA stop.
1183 static void mv_port_stop(struct ata_port *ap)
1186 mv_port_free_dma_mem(ap);
1190 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1191 * @qc: queued command whose SG list to source from
1193 * Populate the SG list and mark the last entry.
1196 * Inherited from caller.
1198 static void mv_fill_sg(struct ata_queued_cmd *qc)
1200 struct mv_port_priv *pp = qc->ap->private_data;
1201 struct scatterlist *sg;
1202 struct mv_sg *mv_sg, *last_sg = NULL;
1205 mv_sg = pp->sg_tbl[qc->tag];
1206 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1207 dma_addr_t addr = sg_dma_address(sg);
1208 u32 sg_len = sg_dma_len(sg);
1211 u32 offset = addr & 0xffff;
1214 if ((offset + sg_len > 0x10000))
1215 len = 0x10000 - offset;
1217 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1218 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1219 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1229 if (likely(last_sg))
1230 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1233 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1235 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1236 (last ? CRQB_CMD_LAST : 0);
1237 *cmdw = cpu_to_le16(tmp);
1241 * mv_qc_prep - Host specific command preparation.
1242 * @qc: queued command to prepare
1244 * This routine simply redirects to the general purpose routine
1245 * if command is not DMA. Else, it handles prep of the CRQB
1246 * (command request block), does some sanity checking, and calls
1247 * the SG load routine.
1250 * Inherited from caller.
1252 static void mv_qc_prep(struct ata_queued_cmd *qc)
1254 struct ata_port *ap = qc->ap;
1255 struct mv_port_priv *pp = ap->private_data;
1257 struct ata_taskfile *tf;
1261 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1262 (qc->tf.protocol != ATA_PROT_NCQ))
1265 /* Fill in command request block
1267 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1268 flags |= CRQB_FLAG_READ;
1269 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1270 flags |= qc->tag << CRQB_TAG_SHIFT;
1272 /* get current queue index from software */
1273 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1275 pp->crqb[in_index].sg_addr =
1276 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1277 pp->crqb[in_index].sg_addr_hi =
1278 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1279 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1281 cw = &pp->crqb[in_index].ata_cmd[0];
1284 /* Sadly, the CRQB cannot accomodate all registers--there are
1285 * only 11 bytes...so we must pick and choose required
1286 * registers based on the command. So, we drop feature and
1287 * hob_feature for [RW] DMA commands, but they are needed for
1288 * NCQ. NCQ will drop hob_nsect.
1290 switch (tf->command) {
1292 case ATA_CMD_READ_EXT:
1294 case ATA_CMD_WRITE_EXT:
1295 case ATA_CMD_WRITE_FUA_EXT:
1296 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1298 case ATA_CMD_FPDMA_READ:
1299 case ATA_CMD_FPDMA_WRITE:
1300 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1301 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1304 /* The only other commands EDMA supports in non-queued and
1305 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1306 * of which are defined/used by Linux. If we get here, this
1307 * driver needs work.
1309 * FIXME: modify libata to give qc_prep a return value and
1310 * return error here.
1312 BUG_ON(tf->command);
1315 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1316 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1317 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1318 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1319 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1320 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1321 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1322 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1323 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1325 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1331 * mv_qc_prep_iie - Host specific command preparation.
1332 * @qc: queued command to prepare
1334 * This routine simply redirects to the general purpose routine
1335 * if command is not DMA. Else, it handles prep of the CRQB
1336 * (command request block), does some sanity checking, and calls
1337 * the SG load routine.
1340 * Inherited from caller.
1342 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1344 struct ata_port *ap = qc->ap;
1345 struct mv_port_priv *pp = ap->private_data;
1346 struct mv_crqb_iie *crqb;
1347 struct ata_taskfile *tf;
1351 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1352 (qc->tf.protocol != ATA_PROT_NCQ))
1355 /* Fill in Gen IIE command request block
1357 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1358 flags |= CRQB_FLAG_READ;
1360 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1361 flags |= qc->tag << CRQB_TAG_SHIFT;
1362 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1364 /* get current queue index from software */
1365 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1367 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1368 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1369 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1370 crqb->flags = cpu_to_le32(flags);
1373 crqb->ata_cmd[0] = cpu_to_le32(
1374 (tf->command << 16) |
1377 crqb->ata_cmd[1] = cpu_to_le32(
1383 crqb->ata_cmd[2] = cpu_to_le32(
1384 (tf->hob_lbal << 0) |
1385 (tf->hob_lbam << 8) |
1386 (tf->hob_lbah << 16) |
1387 (tf->hob_feature << 24)
1389 crqb->ata_cmd[3] = cpu_to_le32(
1391 (tf->hob_nsect << 8)
1394 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1400 * mv_qc_issue - Initiate a command to the host
1401 * @qc: queued command to start
1403 * This routine simply redirects to the general purpose routine
1404 * if command is not DMA. Else, it sanity checks our local
1405 * caches of the request producer/consumer indices then enables
1406 * DMA and bumps the request producer index.
1409 * Inherited from caller.
1411 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1413 struct ata_port *ap = qc->ap;
1414 void __iomem *port_mmio = mv_ap_base(ap);
1415 struct mv_port_priv *pp = ap->private_data;
1418 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1419 (qc->tf.protocol != ATA_PROT_NCQ)) {
1420 /* We're about to send a non-EDMA capable command to the
1421 * port. Turn off EDMA so there won't be problems accessing
1422 * shadow block, etc registers.
1425 return ata_qc_issue_prot(qc);
1428 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1432 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1434 /* and write the request in pointer to kick the EDMA to life */
1435 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1436 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1442 * mv_err_intr - Handle error interrupts on the port
1443 * @ap: ATA channel to manipulate
1444 * @reset_allowed: bool: 0 == don't trigger from reset here
1446 * In most cases, just clear the interrupt and move on. However,
1447 * some cases require an eDMA reset, which is done right before
1448 * the COMRESET in mv_phy_reset(). The SERR case requires a
1449 * clear of pending errors in the SATA SERROR register. Finally,
1450 * if the port disabled DMA, update our cached copy to match.
1453 * Inherited from caller.
1455 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1457 void __iomem *port_mmio = mv_ap_base(ap);
1458 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1459 struct mv_port_priv *pp = ap->private_data;
1460 struct mv_host_priv *hpriv = ap->host->private_data;
1461 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1462 unsigned int action = 0, err_mask = 0;
1463 struct ata_eh_info *ehi = &ap->link.eh_info;
1465 ata_ehi_clear_desc(ehi);
1467 if (!edma_enabled) {
1468 /* just a guess: do we need to do this? should we
1469 * expand this, and do it in all cases?
1471 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1472 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1475 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1477 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1480 * all generations share these EDMA error cause bits
1483 if (edma_err_cause & EDMA_ERR_DEV)
1484 err_mask |= AC_ERR_DEV;
1485 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1486 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1487 EDMA_ERR_INTRL_PAR)) {
1488 err_mask |= AC_ERR_ATA_BUS;
1489 action |= ATA_EH_HARDRESET;
1490 ata_ehi_push_desc(ehi, "parity error");
1492 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1493 ata_ehi_hotplugged(ehi);
1494 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1495 "dev disconnect" : "dev connect");
1496 action |= ATA_EH_HARDRESET;
1499 if (IS_GEN_I(hpriv)) {
1500 eh_freeze_mask = EDMA_EH_FREEZE_5;
1502 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1503 struct mv_port_priv *pp = ap->private_data;
1504 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1505 ata_ehi_push_desc(ehi, "EDMA self-disable");
1508 eh_freeze_mask = EDMA_EH_FREEZE;
1510 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1511 struct mv_port_priv *pp = ap->private_data;
1512 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1513 ata_ehi_push_desc(ehi, "EDMA self-disable");
1516 if (edma_err_cause & EDMA_ERR_SERR) {
1517 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1518 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1519 err_mask = AC_ERR_ATA_BUS;
1520 action |= ATA_EH_HARDRESET;
1524 /* Clear EDMA now that SERR cleanup done */
1525 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1528 err_mask = AC_ERR_OTHER;
1529 action |= ATA_EH_HARDRESET;
1532 ehi->serror |= serr;
1533 ehi->action |= action;
1536 qc->err_mask |= err_mask;
1538 ehi->err_mask |= err_mask;
1540 if (edma_err_cause & eh_freeze_mask)
1541 ata_port_freeze(ap);
1546 static void mv_intr_pio(struct ata_port *ap)
1548 struct ata_queued_cmd *qc;
1551 /* ignore spurious intr if drive still BUSY */
1552 ata_status = readb(ap->ioaddr.status_addr);
1553 if (unlikely(ata_status & ATA_BUSY))
1556 /* get active ATA command */
1557 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1558 if (unlikely(!qc)) /* no active tag */
1560 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1563 /* and finally, complete the ATA command */
1564 qc->err_mask |= ac_err_mask(ata_status);
1565 ata_qc_complete(qc);
1568 static void mv_intr_edma(struct ata_port *ap)
1570 void __iomem *port_mmio = mv_ap_base(ap);
1571 struct mv_host_priv *hpriv = ap->host->private_data;
1572 struct mv_port_priv *pp = ap->private_data;
1573 struct ata_queued_cmd *qc;
1574 u32 out_index, in_index;
1575 bool work_done = false;
1577 /* get h/w response queue pointer */
1578 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1579 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1585 /* get s/w response queue last-read pointer, and compare */
1586 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1587 if (in_index == out_index)
1590 /* 50xx: get active ATA command */
1591 if (IS_GEN_I(hpriv))
1592 tag = ap->link.active_tag;
1594 /* Gen II/IIE: get active ATA command via tag, to enable
1595 * support for queueing. this works transparently for
1596 * queued and non-queued modes.
1599 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1601 qc = ata_qc_from_tag(ap, tag);
1603 /* For non-NCQ mode, the lower 8 bits of status
1604 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1605 * which should be zero if all went well.
1607 status = le16_to_cpu(pp->crpb[out_index].flags);
1608 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1609 mv_err_intr(ap, qc);
1613 /* and finally, complete the ATA command */
1616 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1617 ata_qc_complete(qc);
1620 /* advance software response queue pointer, to
1621 * indicate (after the loop completes) to hardware
1622 * that we have consumed a response queue entry.
1629 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1630 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1631 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1635 * mv_host_intr - Handle all interrupts on the given host controller
1636 * @host: host specific structure
1637 * @relevant: port error bits relevant to this host controller
1638 * @hc: which host controller we're to look at
1640 * Read then write clear the HC interrupt status then walk each
1641 * port connected to the HC and see if it needs servicing. Port
1642 * success ints are reported in the HC interrupt status reg, the
1643 * port error ints are reported in the higher level main
1644 * interrupt status register and thus are passed in via the
1645 * 'relevant' argument.
1648 * Inherited from caller.
1650 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1652 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1653 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1660 port0 = MV_PORTS_PER_HC;
1662 /* we'll need the HC success int register in most cases */
1663 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1667 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1669 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1670 hc, relevant, hc_irq_cause);
1672 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1673 struct ata_port *ap = host->ports[port];
1674 struct mv_port_priv *pp = ap->private_data;
1675 int have_err_bits, hard_port, shift;
1677 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1680 shift = port << 1; /* (port * 2) */
1681 if (port >= MV_PORTS_PER_HC) {
1682 shift++; /* skip bit 8 in the HC Main IRQ reg */
1684 have_err_bits = ((PORT0_ERR << shift) & relevant);
1686 if (unlikely(have_err_bits)) {
1687 struct ata_queued_cmd *qc;
1689 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1690 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1693 mv_err_intr(ap, qc);
1697 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1699 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1700 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1703 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1710 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1712 struct mv_host_priv *hpriv = host->private_data;
1713 struct ata_port *ap;
1714 struct ata_queued_cmd *qc;
1715 struct ata_eh_info *ehi;
1716 unsigned int i, err_mask, printed = 0;
1719 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1721 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1724 DPRINTK("All regs @ PCI error\n");
1725 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1727 writelfl(0, mmio + hpriv->irq_cause_ofs);
1729 for (i = 0; i < host->n_ports; i++) {
1730 ap = host->ports[i];
1731 if (!ata_link_offline(&ap->link)) {
1732 ehi = &ap->link.eh_info;
1733 ata_ehi_clear_desc(ehi);
1735 ata_ehi_push_desc(ehi,
1736 "PCI err cause 0x%08x", err_cause);
1737 err_mask = AC_ERR_HOST_BUS;
1738 ehi->action = ATA_EH_HARDRESET;
1739 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1741 qc->err_mask |= err_mask;
1743 ehi->err_mask |= err_mask;
1745 ata_port_freeze(ap);
1751 * mv_interrupt - Main interrupt event handler
1753 * @dev_instance: private data; in this case the host structure
1755 * Read the read only register to determine if any host
1756 * controllers have pending interrupts. If so, call lower level
1757 * routine to handle. Also check for PCI errors which are only
1761 * This routine holds the host lock while processing pending
1764 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1766 struct ata_host *host = dev_instance;
1767 unsigned int hc, handled = 0, n_hcs;
1768 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1769 u32 irq_stat, irq_mask;
1771 spin_lock(&host->lock);
1772 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1773 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
1775 /* check the cases where we either have nothing pending or have read
1776 * a bogus register value which can indicate HW removal or PCI fault
1778 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1781 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1783 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1784 mv_pci_error(host, mmio);
1786 goto out_unlock; /* skip all other HC irq handling */
1789 for (hc = 0; hc < n_hcs; hc++) {
1790 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1792 mv_host_intr(host, relevant, hc);
1798 spin_unlock(&host->lock);
1800 return IRQ_RETVAL(handled);
1803 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1805 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1806 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1808 return hc_mmio + ofs;
1811 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1815 switch (sc_reg_in) {
1819 ofs = sc_reg_in * sizeof(u32);
1828 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1830 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1831 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1832 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1834 if (ofs != 0xffffffffU) {
1835 *val = readl(addr + ofs);
1841 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1843 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1844 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1845 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1847 if (ofs != 0xffffffffU) {
1848 writelfl(val, addr + ofs);
1854 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1856 struct pci_dev *pdev = to_pci_dev(host->dev);
1859 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1862 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1864 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1867 mv_reset_pci_bus(host, mmio);
1870 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1872 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1875 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1878 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1881 tmp = readl(phy_mmio + MV5_PHY_MODE);
1883 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1884 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1887 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1891 writel(0, mmio + MV_GPIO_PORT_CTL);
1893 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1895 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1897 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1900 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1903 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1904 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1906 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1909 tmp = readl(phy_mmio + MV5_LT_MODE);
1911 writel(tmp, phy_mmio + MV5_LT_MODE);
1913 tmp = readl(phy_mmio + MV5_PHY_CTL);
1916 writel(tmp, phy_mmio + MV5_PHY_CTL);
1919 tmp = readl(phy_mmio + MV5_PHY_MODE);
1921 tmp |= hpriv->signal[port].pre;
1922 tmp |= hpriv->signal[port].amps;
1923 writel(tmp, phy_mmio + MV5_PHY_MODE);
1928 #define ZERO(reg) writel(0, port_mmio + (reg))
1929 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1932 void __iomem *port_mmio = mv_port_base(mmio, port);
1934 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1936 mv_channel_reset(hpriv, mmio, port);
1938 ZERO(0x028); /* command */
1939 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1940 ZERO(0x004); /* timer */
1941 ZERO(0x008); /* irq err cause */
1942 ZERO(0x00c); /* irq err mask */
1943 ZERO(0x010); /* rq bah */
1944 ZERO(0x014); /* rq inp */
1945 ZERO(0x018); /* rq outp */
1946 ZERO(0x01c); /* respq bah */
1947 ZERO(0x024); /* respq outp */
1948 ZERO(0x020); /* respq inp */
1949 ZERO(0x02c); /* test control */
1950 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1954 #define ZERO(reg) writel(0, hc_mmio + (reg))
1955 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1958 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1966 tmp = readl(hc_mmio + 0x20);
1969 writel(tmp, hc_mmio + 0x20);
1973 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1976 unsigned int hc, port;
1978 for (hc = 0; hc < n_hc; hc++) {
1979 for (port = 0; port < MV_PORTS_PER_HC; port++)
1980 mv5_reset_hc_port(hpriv, mmio,
1981 (hc * MV_PORTS_PER_HC) + port);
1983 mv5_reset_one_hc(hpriv, mmio, hc);
1990 #define ZERO(reg) writel(0, mmio + (reg))
1991 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
1993 struct mv_host_priv *hpriv = host->private_data;
1996 tmp = readl(mmio + MV_PCI_MODE);
1998 writel(tmp, mmio + MV_PCI_MODE);
2000 ZERO(MV_PCI_DISC_TIMER);
2001 ZERO(MV_PCI_MSI_TRIGGER);
2002 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2003 ZERO(HC_MAIN_IRQ_MASK_OFS);
2004 ZERO(MV_PCI_SERR_MASK);
2005 ZERO(hpriv->irq_cause_ofs);
2006 ZERO(hpriv->irq_mask_ofs);
2007 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2008 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2009 ZERO(MV_PCI_ERR_ATTRIBUTE);
2010 ZERO(MV_PCI_ERR_COMMAND);
2014 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2018 mv5_reset_flash(hpriv, mmio);
2020 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2022 tmp |= (1 << 5) | (1 << 6);
2023 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2027 * mv6_reset_hc - Perform the 6xxx global soft reset
2028 * @mmio: base address of the HBA
2030 * This routine only applies to 6xxx parts.
2033 * Inherited from caller.
2035 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2038 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2042 /* Following procedure defined in PCI "main command and status
2046 writel(t | STOP_PCI_MASTER, reg);
2048 for (i = 0; i < 1000; i++) {
2051 if (PCI_MASTER_EMPTY & t)
2054 if (!(PCI_MASTER_EMPTY & t)) {
2055 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2063 writel(t | GLOB_SFT_RST, reg);
2066 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2068 if (!(GLOB_SFT_RST & t)) {
2069 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2074 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2077 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2080 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2082 if (GLOB_SFT_RST & t) {
2083 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2090 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2093 void __iomem *port_mmio;
2096 tmp = readl(mmio + MV_RESET_CFG);
2097 if ((tmp & (1 << 0)) == 0) {
2098 hpriv->signal[idx].amps = 0x7 << 8;
2099 hpriv->signal[idx].pre = 0x1 << 5;
2103 port_mmio = mv_port_base(mmio, idx);
2104 tmp = readl(port_mmio + PHY_MODE2);
2106 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2107 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2110 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2112 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2115 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2118 void __iomem *port_mmio = mv_port_base(mmio, port);
2120 u32 hp_flags = hpriv->hp_flags;
2122 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2124 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2127 if (fix_phy_mode2) {
2128 m2 = readl(port_mmio + PHY_MODE2);
2131 writel(m2, port_mmio + PHY_MODE2);
2135 m2 = readl(port_mmio + PHY_MODE2);
2136 m2 &= ~((1 << 16) | (1 << 31));
2137 writel(m2, port_mmio + PHY_MODE2);
2142 /* who knows what this magic does */
2143 tmp = readl(port_mmio + PHY_MODE3);
2146 writel(tmp, port_mmio + PHY_MODE3);
2148 if (fix_phy_mode4) {
2151 m4 = readl(port_mmio + PHY_MODE4);
2153 if (hp_flags & MV_HP_ERRATA_60X1B2)
2154 tmp = readl(port_mmio + 0x310);
2156 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2158 writel(m4, port_mmio + PHY_MODE4);
2160 if (hp_flags & MV_HP_ERRATA_60X1B2)
2161 writel(tmp, port_mmio + 0x310);
2164 /* Revert values of pre-emphasis and signal amps to the saved ones */
2165 m2 = readl(port_mmio + PHY_MODE2);
2167 m2 &= ~MV_M2_PREAMP_MASK;
2168 m2 |= hpriv->signal[port].amps;
2169 m2 |= hpriv->signal[port].pre;
2172 /* according to mvSata 3.6.1, some IIE values are fixed */
2173 if (IS_GEN_IIE(hpriv)) {
2178 writel(m2, port_mmio + PHY_MODE2);
2181 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2182 unsigned int port_no)
2184 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2186 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2188 if (IS_GEN_II(hpriv)) {
2189 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2190 ifctl |= (1 << 7); /* enable gen2i speed */
2191 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2192 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2195 udelay(25); /* allow reset propagation */
2197 /* Spec never mentions clearing the bit. Marvell's driver does
2198 * clear the bit, however.
2200 writelfl(0, port_mmio + EDMA_CMD_OFS);
2202 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2204 if (IS_GEN_I(hpriv))
2209 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2210 * @ap: ATA channel to manipulate
2212 * Part of this is taken from __sata_phy_reset and modified to
2213 * not sleep since this routine gets called from interrupt level.
2216 * Inherited from caller. This is coded to safe to call at
2217 * interrupt level, i.e. it does not sleep.
2219 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2220 unsigned long deadline)
2222 struct mv_port_priv *pp = ap->private_data;
2223 struct mv_host_priv *hpriv = ap->host->private_data;
2224 void __iomem *port_mmio = mv_ap_base(ap);
2228 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2232 u32 sstatus, serror, scontrol;
2234 mv_scr_read(ap, SCR_STATUS, &sstatus);
2235 mv_scr_read(ap, SCR_ERROR, &serror);
2236 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2237 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2238 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2242 /* Issue COMRESET via SControl */
2244 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2247 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2251 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2252 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2256 } while (time_before(jiffies, deadline));
2258 /* work around errata */
2259 if (IS_GEN_II(hpriv) &&
2260 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2262 goto comreset_retry;
2266 u32 sstatus, serror, scontrol;
2268 mv_scr_read(ap, SCR_STATUS, &sstatus);
2269 mv_scr_read(ap, SCR_ERROR, &serror);
2270 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2271 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2272 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2276 if (ata_link_offline(&ap->link)) {
2277 *class = ATA_DEV_NONE;
2281 /* even after SStatus reflects that device is ready,
2282 * it seems to take a while for link to be fully
2283 * established (and thus Status no longer 0x80/0x7F),
2284 * so we poll a bit for that, here.
2288 u8 drv_stat = ata_check_status(ap);
2289 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2294 if (time_after(jiffies, deadline))
2298 /* FIXME: if we passed the deadline, the following
2299 * code probably produces an invalid result
2302 /* finally, read device signature from TF registers */
2303 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2305 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2307 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2312 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2314 struct ata_port *ap = link->ap;
2315 struct mv_port_priv *pp = ap->private_data;
2316 struct ata_eh_context *ehc = &link->eh_context;
2319 rc = mv_stop_dma(ap);
2321 ehc->i.action |= ATA_EH_HARDRESET;
2323 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2324 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2325 ehc->i.action |= ATA_EH_HARDRESET;
2328 /* if we're about to do hardreset, nothing more to do */
2329 if (ehc->i.action & ATA_EH_HARDRESET)
2332 if (ata_link_online(link))
2333 rc = ata_wait_ready(ap, deadline);
2340 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2341 unsigned long deadline)
2343 struct ata_port *ap = link->ap;
2344 struct mv_host_priv *hpriv = ap->host->private_data;
2345 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2349 mv_channel_reset(hpriv, mmio, ap->port_no);
2351 mv_phy_reset(ap, class, deadline);
2356 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2358 struct ata_port *ap = link->ap;
2361 /* print link status */
2362 sata_print_link_status(link);
2365 sata_scr_read(link, SCR_ERROR, &serr);
2366 sata_scr_write_flush(link, SCR_ERROR, serr);
2368 /* bail out if no device is present */
2369 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2370 DPRINTK("EXIT, no device\n");
2374 /* set up device control */
2375 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2378 static void mv_error_handler(struct ata_port *ap)
2380 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2381 mv_hardreset, mv_postreset);
2384 static void mv_eh_freeze(struct ata_port *ap)
2386 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2387 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2391 /* FIXME: handle coalescing completion events properly */
2393 shift = ap->port_no * 2;
2397 mask = 0x3 << shift;
2399 /* disable assertion of portN err, done events */
2400 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2401 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2404 static void mv_eh_thaw(struct ata_port *ap)
2406 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2407 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2408 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2409 void __iomem *port_mmio = mv_ap_base(ap);
2410 u32 tmp, mask, hc_irq_cause;
2411 unsigned int shift, hc_port_no = ap->port_no;
2413 /* FIXME: handle coalescing completion events properly */
2415 shift = ap->port_no * 2;
2421 mask = 0x3 << shift;
2423 /* clear EDMA errors on this port */
2424 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2426 /* clear pending irq events */
2427 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2428 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2429 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2430 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2432 /* enable assertion of portN err, done events */
2433 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2434 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2438 * mv_port_init - Perform some early initialization on a single port.
2439 * @port: libata data structure storing shadow register addresses
2440 * @port_mmio: base address of the port
2442 * Initialize shadow register mmio addresses, clear outstanding
2443 * interrupts on the port, and unmask interrupts for the future
2444 * start of the port.
2447 * Inherited from caller.
2449 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2451 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2454 /* PIO related setup
2456 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2458 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2459 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2460 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2461 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2462 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2463 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2465 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2466 /* special case: control/altstatus doesn't have ATA_REG_ address */
2467 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2470 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2472 /* Clear any currently outstanding port interrupt conditions */
2473 serr_ofs = mv_scr_offset(SCR_ERROR);
2474 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2475 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2477 /* unmask all non-transient EDMA error interrupts */
2478 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2480 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2481 readl(port_mmio + EDMA_CFG_OFS),
2482 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2483 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2486 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2488 struct pci_dev *pdev = to_pci_dev(host->dev);
2489 struct mv_host_priv *hpriv = host->private_data;
2490 u32 hp_flags = hpriv->hp_flags;
2492 switch (board_idx) {
2494 hpriv->ops = &mv5xxx_ops;
2495 hp_flags |= MV_HP_GEN_I;
2497 switch (pdev->revision) {
2499 hp_flags |= MV_HP_ERRATA_50XXB0;
2502 hp_flags |= MV_HP_ERRATA_50XXB2;
2505 dev_printk(KERN_WARNING, &pdev->dev,
2506 "Applying 50XXB2 workarounds to unknown rev\n");
2507 hp_flags |= MV_HP_ERRATA_50XXB2;
2514 hpriv->ops = &mv5xxx_ops;
2515 hp_flags |= MV_HP_GEN_I;
2517 switch (pdev->revision) {
2519 hp_flags |= MV_HP_ERRATA_50XXB0;
2522 hp_flags |= MV_HP_ERRATA_50XXB2;
2525 dev_printk(KERN_WARNING, &pdev->dev,
2526 "Applying B2 workarounds to unknown rev\n");
2527 hp_flags |= MV_HP_ERRATA_50XXB2;
2534 hpriv->ops = &mv6xxx_ops;
2535 hp_flags |= MV_HP_GEN_II;
2537 switch (pdev->revision) {
2539 hp_flags |= MV_HP_ERRATA_60X1B2;
2542 hp_flags |= MV_HP_ERRATA_60X1C0;
2545 dev_printk(KERN_WARNING, &pdev->dev,
2546 "Applying B2 workarounds to unknown rev\n");
2547 hp_flags |= MV_HP_ERRATA_60X1B2;
2553 hp_flags |= MV_HP_PCIE;
2554 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2555 (pdev->device == 0x2300 || pdev->device == 0x2310))
2558 * Highpoint RocketRAID PCIe 23xx series cards:
2560 * Unconfigured drives are treated as "Legacy"
2561 * by the BIOS, and it overwrites sector 8 with
2562 * a "Lgcy" metadata block prior to Linux boot.
2564 * Configured drives (RAID or JBOD) leave sector 8
2565 * alone, but instead overwrite a high numbered
2566 * sector for the RAID metadata. This sector can
2567 * be determined exactly, by truncating the physical
2568 * drive capacity to a nice even GB value.
2570 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2572 * Warn the user, lest they think we're just buggy.
2574 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2575 " BIOS CORRUPTS DATA on all attached drives,"
2576 " regardless of if/how they are configured."
2578 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2579 " use sectors 8-9 on \"Legacy\" drives,"
2580 " and avoid the final two gigabytes on"
2581 " all RocketRAID BIOS initialized drives.\n");
2584 hpriv->ops = &mv6xxx_ops;
2585 hp_flags |= MV_HP_GEN_IIE;
2587 switch (pdev->revision) {
2589 hp_flags |= MV_HP_ERRATA_XX42A0;
2592 hp_flags |= MV_HP_ERRATA_60X1C0;
2595 dev_printk(KERN_WARNING, &pdev->dev,
2596 "Applying 60X1C0 workarounds to unknown rev\n");
2597 hp_flags |= MV_HP_ERRATA_60X1C0;
2603 dev_printk(KERN_ERR, &pdev->dev,
2604 "BUG: invalid board index %u\n", board_idx);
2608 hpriv->hp_flags = hp_flags;
2609 if (hp_flags & MV_HP_PCIE) {
2610 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2611 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2612 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2614 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2615 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2616 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2623 * mv_init_host - Perform some early initialization of the host.
2624 * @host: ATA host to initialize
2625 * @board_idx: controller index
2627 * If possible, do an early global reset of the host. Then do
2628 * our port init and clear/unmask all/relevant host interrupts.
2631 * Inherited from caller.
2633 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2635 int rc = 0, n_hc, port, hc;
2636 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2637 struct mv_host_priv *hpriv = host->private_data;
2639 /* global interrupt mask */
2640 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2642 rc = mv_chip_id(host, board_idx);
2646 n_hc = mv_get_hc_count(host->ports[0]->flags);
2648 for (port = 0; port < host->n_ports; port++)
2649 hpriv->ops->read_preamp(hpriv, port, mmio);
2651 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2655 hpriv->ops->reset_flash(hpriv, mmio);
2656 hpriv->ops->reset_bus(host, mmio);
2657 hpriv->ops->enable_leds(hpriv, mmio);
2659 for (port = 0; port < host->n_ports; port++) {
2660 if (IS_GEN_II(hpriv)) {
2661 void __iomem *port_mmio = mv_port_base(mmio, port);
2663 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2664 ifctl |= (1 << 7); /* enable gen2i speed */
2665 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2666 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2669 hpriv->ops->phy_errata(hpriv, mmio, port);
2672 for (port = 0; port < host->n_ports; port++) {
2673 struct ata_port *ap = host->ports[port];
2674 void __iomem *port_mmio = mv_port_base(mmio, port);
2675 unsigned int offset = port_mmio - mmio;
2677 mv_port_init(&ap->ioaddr, port_mmio);
2680 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2681 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2685 for (hc = 0; hc < n_hc; hc++) {
2686 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2688 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2689 "(before clear)=0x%08x\n", hc,
2690 readl(hc_mmio + HC_CFG_OFS),
2691 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2693 /* Clear any currently outstanding hc interrupt conditions */
2694 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2697 /* Clear any currently outstanding host interrupt conditions */
2698 writelfl(0, mmio + hpriv->irq_cause_ofs);
2700 /* and unmask interrupt generation for host regs */
2701 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2703 if (IS_GEN_I(hpriv))
2704 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2706 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2708 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2709 "PCI int cause/mask=0x%08x/0x%08x\n",
2710 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2711 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2712 readl(mmio + hpriv->irq_cause_ofs),
2713 readl(mmio + hpriv->irq_mask_ofs));
2720 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
2722 static struct pci_driver mv_pci_driver = {
2724 .id_table = mv_pci_tbl,
2725 .probe = mv_init_one,
2726 .remove = ata_pci_remove_one,
2732 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2735 /* move to PCI layer or libata core? */
2736 static int pci_go_64(struct pci_dev *pdev)
2740 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2741 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2743 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2745 dev_printk(KERN_ERR, &pdev->dev,
2746 "64-bit DMA enable failed\n");
2751 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2753 dev_printk(KERN_ERR, &pdev->dev,
2754 "32-bit DMA enable failed\n");
2757 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2759 dev_printk(KERN_ERR, &pdev->dev,
2760 "32-bit consistent DMA enable failed\n");
2769 * mv_print_info - Dump key info to kernel log for perusal.
2770 * @host: ATA host to print info about
2772 * FIXME: complete this.
2775 * Inherited from caller.
2777 static void mv_print_info(struct ata_host *host)
2779 struct pci_dev *pdev = to_pci_dev(host->dev);
2780 struct mv_host_priv *hpriv = host->private_data;
2782 const char *scc_s, *gen;
2784 /* Use this to determine the HW stepping of the chip so we know
2785 * what errata to workaround
2787 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2790 else if (scc == 0x01)
2795 if (IS_GEN_I(hpriv))
2797 else if (IS_GEN_II(hpriv))
2799 else if (IS_GEN_IIE(hpriv))
2804 dev_printk(KERN_INFO, &pdev->dev,
2805 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2806 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2807 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2810 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2812 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2814 if (!hpriv->crqb_pool)
2817 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2819 if (!hpriv->crpb_pool)
2822 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2824 if (!hpriv->sg_tbl_pool)
2831 * mv_init_one - handle a positive probe of a Marvell host
2832 * @pdev: PCI device found
2833 * @ent: PCI device ID entry for the matched host
2836 * Inherited from caller.
2838 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2840 static int printed_version;
2841 unsigned int board_idx = (unsigned int)ent->driver_data;
2842 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2843 struct ata_host *host;
2844 struct mv_host_priv *hpriv;
2847 if (!printed_version++)
2848 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2851 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2853 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2854 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2855 if (!host || !hpriv)
2857 host->private_data = hpriv;
2859 /* acquire resources */
2860 rc = pcim_enable_device(pdev);
2864 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2866 pcim_pin_device(pdev);
2869 host->iomap = pcim_iomap_table(pdev);
2871 rc = pci_go_64(pdev);
2875 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2879 /* initialize adapter */
2880 rc = mv_init_host(host, board_idx);
2884 /* Enable interrupts */
2885 if (msi && pci_enable_msi(pdev))
2888 mv_dump_pci_cfg(pdev, 0x68);
2889 mv_print_info(host);
2891 pci_set_master(pdev);
2892 pci_try_set_mwi(pdev);
2893 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2894 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2898 static int __init mv_init(void)
2902 rc = pci_register_driver(&mv_pci_driver);
2907 static void __exit mv_exit(void)
2910 pci_unregister_driver(&mv_pci_driver);
2914 MODULE_AUTHOR("Brett Russ");
2915 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2916 MODULE_LICENSE("GPL");
2917 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2918 MODULE_VERSION(DRV_VERSION);
2921 module_param(msi, int, 0444);
2922 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2925 module_init(mv_init);
2926 module_exit(mv_exit);