2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.01"
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
118 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
119 MV_PORT_HC_SHIFT = 2,
120 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
124 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
125 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
126 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
127 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
128 ATA_FLAG_PIO_POLLING,
129 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
131 CRQB_FLAG_READ = (1 << 0),
133 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
134 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
135 CRQB_CMD_ADDR_SHIFT = 8,
136 CRQB_CMD_CS = (0x2 << 11),
137 CRQB_CMD_LAST = (1 << 15),
139 CRPB_FLAG_STATUS_SHIFT = 8,
140 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
141 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
143 EPRD_FLAG_END_OF_TBL = (1 << 31),
145 /* PCI interface registers */
147 PCI_COMMAND_OFS = 0xc00,
149 PCI_MAIN_CMD_STS_OFS = 0xd30,
150 STOP_PCI_MASTER = (1 << 2),
151 PCI_MASTER_EMPTY = (1 << 3),
152 GLOB_SFT_RST = (1 << 4),
155 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
156 MV_PCI_DISC_TIMER = 0xd04,
157 MV_PCI_MSI_TRIGGER = 0xc38,
158 MV_PCI_SERR_MASK = 0xc28,
159 MV_PCI_XBAR_TMOUT = 0x1d04,
160 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
161 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
162 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
163 MV_PCI_ERR_COMMAND = 0x1d50,
165 PCI_IRQ_CAUSE_OFS = 0x1d58,
166 PCI_IRQ_MASK_OFS = 0x1d5c,
167 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
169 PCIE_IRQ_CAUSE_OFS = 0x1900,
170 PCIE_IRQ_MASK_OFS = 0x1910,
171 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
173 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
174 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
175 PORT0_ERR = (1 << 0), /* shift by port # */
176 PORT0_DONE = (1 << 1), /* shift by port # */
177 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
178 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
180 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
181 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
182 PORTS_0_3_COAL_DONE = (1 << 8),
183 PORTS_4_7_COAL_DONE = (1 << 17),
184 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
185 GPIO_INT = (1 << 22),
186 SELF_INT = (1 << 23),
187 TWSI_INT = (1 << 24),
188 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
189 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
190 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
191 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
193 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 /* SATAHC registers */
199 HC_IRQ_CAUSE_OFS = 0x14,
200 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
201 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
202 DEV_IRQ = (1 << 8), /* shift by port # */
204 /* Shadow block registers */
206 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
209 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
210 SATA_ACTIVE_OFS = 0x350,
211 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
218 SATA_INTERFACE_CTL = 0x050,
220 MV_M2_PREAMP_MASK = 0x7e0,
224 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
225 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
226 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
227 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
228 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
230 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
231 EDMA_ERR_IRQ_MASK_OFS = 0xc,
232 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
233 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
234 EDMA_ERR_DEV = (1 << 2), /* device error */
235 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
236 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
237 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
238 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
239 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
240 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
241 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
242 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
243 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
244 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
245 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
247 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
248 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
249 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
250 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
251 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
253 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
255 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
256 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
257 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
258 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
259 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
260 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
262 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
264 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
265 EDMA_ERR_OVERRUN_5 = (1 << 5),
266 EDMA_ERR_UNDERRUN_5 = (1 << 6),
268 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
269 EDMA_ERR_LNK_CTRL_RX_1 |
270 EDMA_ERR_LNK_CTRL_RX_3 |
271 EDMA_ERR_LNK_CTRL_TX,
273 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
283 EDMA_ERR_LNK_CTRL_RX_2 |
284 EDMA_ERR_LNK_DATA_RX |
285 EDMA_ERR_LNK_DATA_TX |
286 EDMA_ERR_TRANS_PROTO,
287 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
292 EDMA_ERR_UNDERRUN_5 |
293 EDMA_ERR_SELF_DIS_5 |
299 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
300 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
302 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
303 EDMA_REQ_Q_PTR_SHIFT = 5,
305 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
306 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
307 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
308 EDMA_RSP_Q_PTR_SHIFT = 3,
310 EDMA_CMD_OFS = 0x28, /* EDMA command register */
311 EDMA_EN = (1 << 0), /* enable EDMA */
312 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
313 ATA_RST = (1 << 2), /* reset trans/link/phy */
315 EDMA_IORDY_TMOUT = 0x34,
318 /* Host private flags (hp_flags) */
319 MV_HP_FLAG_MSI = (1 << 0),
320 MV_HP_ERRATA_50XXB0 = (1 << 1),
321 MV_HP_ERRATA_50XXB2 = (1 << 2),
322 MV_HP_ERRATA_60X1B2 = (1 << 3),
323 MV_HP_ERRATA_60X1C0 = (1 << 4),
324 MV_HP_ERRATA_XX42A0 = (1 << 5),
325 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
326 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
327 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
328 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
330 /* Port private flags (pp_flags) */
331 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
332 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
333 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
336 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
337 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
338 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
341 /* DMA boundary 0xffff is required by the s/g splitting
342 * we need on /length/ in mv_fill-sg().
344 MV_DMA_BOUNDARY = 0xffffU,
346 /* mask of register bits containing lower 32 bits
347 * of EDMA request queue DMA address
349 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
351 /* ditto, for response queue */
352 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
365 /* Command ReQuest Block: 32B */
381 /* Command ResPonse Block: 8B */
388 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
396 struct mv_port_priv {
397 struct mv_crqb *crqb;
399 struct mv_crpb *crpb;
401 struct mv_sg *sg_tbl;
402 dma_addr_t sg_tbl_dma;
404 unsigned int req_idx;
405 unsigned int resp_idx;
410 struct mv_port_signal {
415 struct mv_host_priv {
417 struct mv_port_signal signal[8];
418 const struct mv_hw_ops *ops;
423 * These consistent DMA memory pools give us guaranteed
424 * alignment for hardware-accessed data structures,
425 * and less memory waste in accomplishing the alignment.
427 struct dma_pool *crqb_pool;
428 struct dma_pool *crpb_pool;
429 struct dma_pool *sg_tbl_pool;
433 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
435 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
436 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
438 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
440 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
441 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
444 static void mv_irq_clear(struct ata_port *ap);
445 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
446 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
447 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
448 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
449 static int mv_port_start(struct ata_port *ap);
450 static void mv_port_stop(struct ata_port *ap);
451 static void mv_qc_prep(struct ata_queued_cmd *qc);
452 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
453 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
454 static void mv_error_handler(struct ata_port *ap);
455 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
456 static void mv_eh_freeze(struct ata_port *ap);
457 static void mv_eh_thaw(struct ata_port *ap);
458 static void mv6_dev_config(struct ata_device *dev);
459 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
461 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
463 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
464 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
466 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
468 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
469 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
471 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
473 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
474 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
476 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
478 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
479 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
480 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
481 unsigned int port_no);
482 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
483 void __iomem *port_mmio, int want_ncq);
484 static int __mv_stop_dma(struct ata_port *ap);
486 static struct scsi_host_template mv5_sht = {
487 .module = THIS_MODULE,
489 .ioctl = ata_scsi_ioctl,
490 .queuecommand = ata_scsi_queuecmd,
491 .can_queue = ATA_DEF_QUEUE,
492 .this_id = ATA_SHT_THIS_ID,
493 .sg_tablesize = MV_MAX_SG_CT / 2,
494 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
495 .emulated = ATA_SHT_EMULATED,
497 .proc_name = DRV_NAME,
498 .dma_boundary = MV_DMA_BOUNDARY,
499 .slave_configure = ata_scsi_slave_config,
500 .slave_destroy = ata_scsi_slave_destroy,
501 .bios_param = ata_std_bios_param,
504 static struct scsi_host_template mv6_sht = {
505 .module = THIS_MODULE,
507 .ioctl = ata_scsi_ioctl,
508 .queuecommand = ata_scsi_queuecmd,
509 .can_queue = ATA_DEF_QUEUE,
510 .this_id = ATA_SHT_THIS_ID,
511 .sg_tablesize = MV_MAX_SG_CT / 2,
512 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
513 .emulated = ATA_SHT_EMULATED,
515 .proc_name = DRV_NAME,
516 .dma_boundary = MV_DMA_BOUNDARY,
517 .slave_configure = ata_scsi_slave_config,
518 .slave_destroy = ata_scsi_slave_destroy,
519 .bios_param = ata_std_bios_param,
522 static const struct ata_port_operations mv5_ops = {
523 .tf_load = ata_tf_load,
524 .tf_read = ata_tf_read,
525 .check_status = ata_check_status,
526 .exec_command = ata_exec_command,
527 .dev_select = ata_std_dev_select,
529 .cable_detect = ata_cable_sata,
531 .qc_prep = mv_qc_prep,
532 .qc_issue = mv_qc_issue,
533 .data_xfer = ata_data_xfer,
535 .irq_clear = mv_irq_clear,
536 .irq_on = ata_irq_on,
538 .error_handler = mv_error_handler,
539 .post_internal_cmd = mv_post_int_cmd,
540 .freeze = mv_eh_freeze,
543 .scr_read = mv5_scr_read,
544 .scr_write = mv5_scr_write,
546 .port_start = mv_port_start,
547 .port_stop = mv_port_stop,
550 static const struct ata_port_operations mv6_ops = {
551 .dev_config = mv6_dev_config,
552 .tf_load = ata_tf_load,
553 .tf_read = ata_tf_read,
554 .check_status = ata_check_status,
555 .exec_command = ata_exec_command,
556 .dev_select = ata_std_dev_select,
558 .cable_detect = ata_cable_sata,
560 .qc_prep = mv_qc_prep,
561 .qc_issue = mv_qc_issue,
562 .data_xfer = ata_data_xfer,
564 .irq_clear = mv_irq_clear,
565 .irq_on = ata_irq_on,
567 .error_handler = mv_error_handler,
568 .post_internal_cmd = mv_post_int_cmd,
569 .freeze = mv_eh_freeze,
572 .scr_read = mv_scr_read,
573 .scr_write = mv_scr_write,
575 .port_start = mv_port_start,
576 .port_stop = mv_port_stop,
579 static const struct ata_port_operations mv_iie_ops = {
580 .tf_load = ata_tf_load,
581 .tf_read = ata_tf_read,
582 .check_status = ata_check_status,
583 .exec_command = ata_exec_command,
584 .dev_select = ata_std_dev_select,
586 .cable_detect = ata_cable_sata,
588 .qc_prep = mv_qc_prep_iie,
589 .qc_issue = mv_qc_issue,
590 .data_xfer = ata_data_xfer,
592 .irq_clear = mv_irq_clear,
593 .irq_on = ata_irq_on,
595 .error_handler = mv_error_handler,
596 .post_internal_cmd = mv_post_int_cmd,
597 .freeze = mv_eh_freeze,
600 .scr_read = mv_scr_read,
601 .scr_write = mv_scr_write,
603 .port_start = mv_port_start,
604 .port_stop = mv_port_stop,
607 static const struct ata_port_info mv_port_info[] = {
609 .flags = MV_COMMON_FLAGS,
610 .pio_mask = 0x1f, /* pio0-4 */
611 .udma_mask = ATA_UDMA6,
612 .port_ops = &mv5_ops,
615 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
616 .pio_mask = 0x1f, /* pio0-4 */
617 .udma_mask = ATA_UDMA6,
618 .port_ops = &mv5_ops,
621 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
622 .pio_mask = 0x1f, /* pio0-4 */
623 .udma_mask = ATA_UDMA6,
624 .port_ops = &mv5_ops,
627 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
628 .pio_mask = 0x1f, /* pio0-4 */
629 .udma_mask = ATA_UDMA6,
630 .port_ops = &mv6_ops,
633 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
635 .pio_mask = 0x1f, /* pio0-4 */
636 .udma_mask = ATA_UDMA6,
637 .port_ops = &mv6_ops,
640 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
641 .pio_mask = 0x1f, /* pio0-4 */
642 .udma_mask = ATA_UDMA6,
643 .port_ops = &mv_iie_ops,
646 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
647 .pio_mask = 0x1f, /* pio0-4 */
648 .udma_mask = ATA_UDMA6,
649 .port_ops = &mv_iie_ops,
653 static const struct pci_device_id mv_pci_tbl[] = {
654 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
655 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
656 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
657 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
658 /* RocketRAID 1740/174x have different identifiers */
659 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
660 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
662 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
663 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
664 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
665 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
666 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
668 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
671 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
673 /* Marvell 7042 support */
674 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
676 /* Highpoint RocketRAID PCIe series */
677 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
678 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
680 { } /* terminate list */
683 static struct pci_driver mv_pci_driver = {
685 .id_table = mv_pci_tbl,
686 .probe = mv_init_one,
687 .remove = ata_pci_remove_one,
690 static const struct mv_hw_ops mv5xxx_ops = {
691 .phy_errata = mv5_phy_errata,
692 .enable_leds = mv5_enable_leds,
693 .read_preamp = mv5_read_preamp,
694 .reset_hc = mv5_reset_hc,
695 .reset_flash = mv5_reset_flash,
696 .reset_bus = mv5_reset_bus,
699 static const struct mv_hw_ops mv6xxx_ops = {
700 .phy_errata = mv6_phy_errata,
701 .enable_leds = mv6_enable_leds,
702 .read_preamp = mv6_read_preamp,
703 .reset_hc = mv6_reset_hc,
704 .reset_flash = mv6_reset_flash,
705 .reset_bus = mv_reset_pci_bus,
711 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
714 /* move to PCI layer or libata core? */
715 static int pci_go_64(struct pci_dev *pdev)
719 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
720 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
722 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
724 dev_printk(KERN_ERR, &pdev->dev,
725 "64-bit DMA enable failed\n");
730 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
732 dev_printk(KERN_ERR, &pdev->dev,
733 "32-bit DMA enable failed\n");
736 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
738 dev_printk(KERN_ERR, &pdev->dev,
739 "32-bit consistent DMA enable failed\n");
751 static inline void writelfl(unsigned long data, void __iomem *addr)
754 (void) readl(addr); /* flush to avoid PCI posted write */
757 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
759 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
762 static inline unsigned int mv_hc_from_port(unsigned int port)
764 return port >> MV_PORT_HC_SHIFT;
767 static inline unsigned int mv_hardport_from_port(unsigned int port)
769 return port & MV_PORT_MASK;
772 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
775 return mv_hc_base(base, mv_hc_from_port(port));
778 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
780 return mv_hc_base_from_port(base, port) +
781 MV_SATAHC_ARBTR_REG_SZ +
782 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
785 static inline void __iomem *mv_ap_base(struct ata_port *ap)
787 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
790 static inline int mv_get_hc_count(unsigned long port_flags)
792 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
795 static void mv_irq_clear(struct ata_port *ap)
799 static void mv_set_edma_ptrs(void __iomem *port_mmio,
800 struct mv_host_priv *hpriv,
801 struct mv_port_priv *pp)
806 * initialize request queue
808 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
810 WARN_ON(pp->crqb_dma & 0x3ff);
811 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
812 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
813 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
815 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
816 writelfl((pp->crqb_dma & 0xffffffff) | index,
817 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
819 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
822 * initialize response queue
824 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
826 WARN_ON(pp->crpb_dma & 0xff);
827 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
829 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
830 writelfl((pp->crpb_dma & 0xffffffff) | index,
831 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
833 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
835 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
836 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
840 * mv_start_dma - Enable eDMA engine
841 * @base: port base address
842 * @pp: port private data
844 * Verify the local cache of the eDMA state is accurate with a
848 * Inherited from caller.
850 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
851 struct mv_port_priv *pp, u8 protocol)
853 int want_ncq = (protocol == ATA_PROT_NCQ);
855 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
856 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
857 if (want_ncq != using_ncq)
860 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
861 struct mv_host_priv *hpriv = ap->host->private_data;
862 int hard_port = mv_hardport_from_port(ap->port_no);
863 void __iomem *hc_mmio = mv_hc_base_from_port(
864 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
865 u32 hc_irq_cause, ipending;
867 /* clear EDMA event indicators, if any */
868 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
870 /* clear EDMA interrupt indicator, if any */
871 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
872 ipending = (DEV_IRQ << hard_port) |
873 (CRPB_DMA_DONE << hard_port);
874 if (hc_irq_cause & ipending) {
875 writelfl(hc_irq_cause & ~ipending,
876 hc_mmio + HC_IRQ_CAUSE_OFS);
879 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
881 /* clear FIS IRQ Cause */
882 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
884 mv_set_edma_ptrs(port_mmio, hpriv, pp);
886 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
887 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
889 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
893 * __mv_stop_dma - Disable eDMA engine
894 * @ap: ATA channel to manipulate
896 * Verify the local cache of the eDMA state is accurate with a
900 * Inherited from caller.
902 static int __mv_stop_dma(struct ata_port *ap)
904 void __iomem *port_mmio = mv_ap_base(ap);
905 struct mv_port_priv *pp = ap->private_data;
909 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
910 /* Disable EDMA if active. The disable bit auto clears.
912 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
913 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
915 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
918 /* now properly wait for the eDMA to stop */
919 for (i = 1000; i > 0; i--) {
920 reg = readl(port_mmio + EDMA_CMD_OFS);
921 if (!(reg & EDMA_EN))
928 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
935 static int mv_stop_dma(struct ata_port *ap)
940 spin_lock_irqsave(&ap->host->lock, flags);
941 rc = __mv_stop_dma(ap);
942 spin_unlock_irqrestore(&ap->host->lock, flags);
948 static void mv_dump_mem(void __iomem *start, unsigned bytes)
951 for (b = 0; b < bytes; ) {
952 DPRINTK("%p: ", start + b);
953 for (w = 0; b < bytes && w < 4; w++) {
954 printk("%08x ", readl(start + b));
962 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
967 for (b = 0; b < bytes; ) {
968 DPRINTK("%02x: ", b);
969 for (w = 0; b < bytes && w < 4; w++) {
970 (void) pci_read_config_dword(pdev, b, &dw);
978 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
979 struct pci_dev *pdev)
982 void __iomem *hc_base = mv_hc_base(mmio_base,
983 port >> MV_PORT_HC_SHIFT);
984 void __iomem *port_base;
985 int start_port, num_ports, p, start_hc, num_hcs, hc;
988 start_hc = start_port = 0;
989 num_ports = 8; /* shld be benign for 4 port devs */
992 start_hc = port >> MV_PORT_HC_SHIFT;
994 num_ports = num_hcs = 1;
996 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
997 num_ports > 1 ? num_ports - 1 : start_port);
1000 DPRINTK("PCI config space regs:\n");
1001 mv_dump_pci_cfg(pdev, 0x68);
1003 DPRINTK("PCI regs:\n");
1004 mv_dump_mem(mmio_base+0xc00, 0x3c);
1005 mv_dump_mem(mmio_base+0xd00, 0x34);
1006 mv_dump_mem(mmio_base+0xf00, 0x4);
1007 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1008 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1009 hc_base = mv_hc_base(mmio_base, hc);
1010 DPRINTK("HC regs (HC %i):\n", hc);
1011 mv_dump_mem(hc_base, 0x1c);
1013 for (p = start_port; p < start_port + num_ports; p++) {
1014 port_base = mv_port_base(mmio_base, p);
1015 DPRINTK("EDMA regs (port %i):\n", p);
1016 mv_dump_mem(port_base, 0x54);
1017 DPRINTK("SATA regs (port %i):\n", p);
1018 mv_dump_mem(port_base+0x300, 0x60);
1023 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1027 switch (sc_reg_in) {
1031 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1034 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1043 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1045 unsigned int ofs = mv_scr_offset(sc_reg_in);
1047 if (ofs != 0xffffffffU) {
1048 *val = readl(mv_ap_base(ap) + ofs);
1054 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1056 unsigned int ofs = mv_scr_offset(sc_reg_in);
1058 if (ofs != 0xffffffffU) {
1059 writelfl(val, mv_ap_base(ap) + ofs);
1065 static void mv6_dev_config(struct ata_device *adev)
1068 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1069 * See mv_qc_prep() for more info.
1071 if (adev->flags & ATA_DFLAG_NCQ)
1072 if (adev->max_sectors > ATA_MAX_SECTORS)
1073 adev->max_sectors = ATA_MAX_SECTORS;
1076 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1077 void __iomem *port_mmio, int want_ncq)
1081 /* set up non-NCQ EDMA configuration */
1082 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1084 if (IS_GEN_I(hpriv))
1085 cfg |= (1 << 8); /* enab config burst size mask */
1087 else if (IS_GEN_II(hpriv))
1088 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1090 else if (IS_GEN_IIE(hpriv)) {
1091 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1092 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1093 cfg |= (1 << 18); /* enab early completion */
1094 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1098 cfg |= EDMA_CFG_NCQ;
1099 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1101 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1103 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1106 static void mv_port_free_dma_mem(struct ata_port *ap)
1108 struct mv_host_priv *hpriv = ap->host->private_data;
1109 struct mv_port_priv *pp = ap->private_data;
1112 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1116 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1120 dma_pool_free(hpriv->sg_tbl_pool, pp->sg_tbl, pp->sg_tbl_dma);
1126 * mv_port_start - Port specific init/start routine.
1127 * @ap: ATA channel to manipulate
1129 * Allocate and point to DMA memory, init port private memory,
1133 * Inherited from caller.
1135 static int mv_port_start(struct ata_port *ap)
1137 struct device *dev = ap->host->dev;
1138 struct mv_host_priv *hpriv = ap->host->private_data;
1139 struct mv_port_priv *pp;
1140 void __iomem *port_mmio = mv_ap_base(ap);
1141 unsigned long flags;
1144 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1147 ap->private_data = pp;
1149 rc = ata_pad_alloc(ap, dev);
1153 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1156 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1158 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1160 goto out_port_free_dma_mem;
1161 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1163 pp->sg_tbl = dma_pool_alloc(hpriv->sg_tbl_pool, GFP_KERNEL,
1166 goto out_port_free_dma_mem;
1168 spin_lock_irqsave(&ap->host->lock, flags);
1170 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1171 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1173 spin_unlock_irqrestore(&ap->host->lock, flags);
1175 /* Don't turn on EDMA here...do it before DMA commands only. Else
1176 * we'll be unable to send non-data, PIO, etc due to restricted access
1181 out_port_free_dma_mem:
1182 mv_port_free_dma_mem(ap);
1187 * mv_port_stop - Port specific cleanup/stop routine.
1188 * @ap: ATA channel to manipulate
1190 * Stop DMA, cleanup port memory.
1193 * This routine uses the host lock to protect the DMA stop.
1195 static void mv_port_stop(struct ata_port *ap)
1198 mv_port_free_dma_mem(ap);
1202 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1203 * @qc: queued command whose SG list to source from
1205 * Populate the SG list and mark the last entry.
1208 * Inherited from caller.
1210 static void mv_fill_sg(struct ata_queued_cmd *qc)
1212 struct mv_port_priv *pp = qc->ap->private_data;
1213 struct scatterlist *sg;
1214 struct mv_sg *mv_sg, *last_sg = NULL;
1218 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1219 dma_addr_t addr = sg_dma_address(sg);
1220 u32 sg_len = sg_dma_len(sg);
1223 u32 offset = addr & 0xffff;
1226 if ((offset + sg_len > 0x10000))
1227 len = 0x10000 - offset;
1229 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1230 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1231 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1241 if (likely(last_sg))
1242 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1245 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1247 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1248 (last ? CRQB_CMD_LAST : 0);
1249 *cmdw = cpu_to_le16(tmp);
1253 * mv_qc_prep - Host specific command preparation.
1254 * @qc: queued command to prepare
1256 * This routine simply redirects to the general purpose routine
1257 * if command is not DMA. Else, it handles prep of the CRQB
1258 * (command request block), does some sanity checking, and calls
1259 * the SG load routine.
1262 * Inherited from caller.
1264 static void mv_qc_prep(struct ata_queued_cmd *qc)
1266 struct ata_port *ap = qc->ap;
1267 struct mv_port_priv *pp = ap->private_data;
1269 struct ata_taskfile *tf;
1273 if (qc->tf.protocol != ATA_PROT_DMA)
1276 /* Fill in command request block
1278 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1279 flags |= CRQB_FLAG_READ;
1280 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1281 flags |= qc->tag << CRQB_TAG_SHIFT;
1283 /* get current queue index from software */
1284 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1286 pp->crqb[in_index].sg_addr =
1287 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1288 pp->crqb[in_index].sg_addr_hi =
1289 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1290 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1292 cw = &pp->crqb[in_index].ata_cmd[0];
1295 /* Sadly, the CRQB cannot accomodate all registers--there are
1296 * only 11 bytes...so we must pick and choose required
1297 * registers based on the command. So, we drop feature and
1298 * hob_feature for [RW] DMA commands, but they are needed for
1299 * NCQ. NCQ will drop hob_nsect.
1301 switch (tf->command) {
1303 case ATA_CMD_READ_EXT:
1305 case ATA_CMD_WRITE_EXT:
1306 case ATA_CMD_WRITE_FUA_EXT:
1307 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1309 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1310 case ATA_CMD_FPDMA_READ:
1311 case ATA_CMD_FPDMA_WRITE:
1312 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1313 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1315 #endif /* FIXME: remove this line when NCQ added */
1317 /* The only other commands EDMA supports in non-queued and
1318 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1319 * of which are defined/used by Linux. If we get here, this
1320 * driver needs work.
1322 * FIXME: modify libata to give qc_prep a return value and
1323 * return error here.
1325 BUG_ON(tf->command);
1328 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1329 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1330 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1331 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1332 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1333 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1334 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1335 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1336 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1338 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1344 * mv_qc_prep_iie - Host specific command preparation.
1345 * @qc: queued command to prepare
1347 * This routine simply redirects to the general purpose routine
1348 * if command is not DMA. Else, it handles prep of the CRQB
1349 * (command request block), does some sanity checking, and calls
1350 * the SG load routine.
1353 * Inherited from caller.
1355 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1357 struct ata_port *ap = qc->ap;
1358 struct mv_port_priv *pp = ap->private_data;
1359 struct mv_crqb_iie *crqb;
1360 struct ata_taskfile *tf;
1364 if (qc->tf.protocol != ATA_PROT_DMA)
1367 /* Fill in Gen IIE command request block
1369 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1370 flags |= CRQB_FLAG_READ;
1372 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1373 flags |= qc->tag << CRQB_TAG_SHIFT;
1374 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1376 /* get current queue index from software */
1377 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1379 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1380 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1381 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1382 crqb->flags = cpu_to_le32(flags);
1385 crqb->ata_cmd[0] = cpu_to_le32(
1386 (tf->command << 16) |
1389 crqb->ata_cmd[1] = cpu_to_le32(
1395 crqb->ata_cmd[2] = cpu_to_le32(
1396 (tf->hob_lbal << 0) |
1397 (tf->hob_lbam << 8) |
1398 (tf->hob_lbah << 16) |
1399 (tf->hob_feature << 24)
1401 crqb->ata_cmd[3] = cpu_to_le32(
1403 (tf->hob_nsect << 8)
1406 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1412 * mv_qc_issue - Initiate a command to the host
1413 * @qc: queued command to start
1415 * This routine simply redirects to the general purpose routine
1416 * if command is not DMA. Else, it sanity checks our local
1417 * caches of the request producer/consumer indices then enables
1418 * DMA and bumps the request producer index.
1421 * Inherited from caller.
1423 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1425 struct ata_port *ap = qc->ap;
1426 void __iomem *port_mmio = mv_ap_base(ap);
1427 struct mv_port_priv *pp = ap->private_data;
1430 if (qc->tf.protocol != ATA_PROT_DMA) {
1431 /* We're about to send a non-EDMA capable command to the
1432 * port. Turn off EDMA so there won't be problems accessing
1433 * shadow block, etc registers.
1436 return ata_qc_issue_prot(qc);
1439 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1441 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1443 /* until we do queuing, the queue should be empty at this point */
1444 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1445 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1449 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1451 /* and write the request in pointer to kick the EDMA to life */
1452 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1453 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1459 * mv_err_intr - Handle error interrupts on the port
1460 * @ap: ATA channel to manipulate
1461 * @reset_allowed: bool: 0 == don't trigger from reset here
1463 * In most cases, just clear the interrupt and move on. However,
1464 * some cases require an eDMA reset, which is done right before
1465 * the COMRESET in mv_phy_reset(). The SERR case requires a
1466 * clear of pending errors in the SATA SERROR register. Finally,
1467 * if the port disabled DMA, update our cached copy to match.
1470 * Inherited from caller.
1472 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1474 void __iomem *port_mmio = mv_ap_base(ap);
1475 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1476 struct mv_port_priv *pp = ap->private_data;
1477 struct mv_host_priv *hpriv = ap->host->private_data;
1478 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1479 unsigned int action = 0, err_mask = 0;
1480 struct ata_eh_info *ehi = &ap->link.eh_info;
1482 ata_ehi_clear_desc(ehi);
1484 if (!edma_enabled) {
1485 /* just a guess: do we need to do this? should we
1486 * expand this, and do it in all cases?
1488 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1489 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1492 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1494 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1497 * all generations share these EDMA error cause bits
1500 if (edma_err_cause & EDMA_ERR_DEV)
1501 err_mask |= AC_ERR_DEV;
1502 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1503 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1504 EDMA_ERR_INTRL_PAR)) {
1505 err_mask |= AC_ERR_ATA_BUS;
1506 action |= ATA_EH_HARDRESET;
1507 ata_ehi_push_desc(ehi, "parity error");
1509 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1510 ata_ehi_hotplugged(ehi);
1511 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1512 "dev disconnect" : "dev connect");
1513 action |= ATA_EH_HARDRESET;
1516 if (IS_GEN_I(hpriv)) {
1517 eh_freeze_mask = EDMA_EH_FREEZE_5;
1519 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1520 struct mv_port_priv *pp = ap->private_data;
1521 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1522 ata_ehi_push_desc(ehi, "EDMA self-disable");
1525 eh_freeze_mask = EDMA_EH_FREEZE;
1527 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1528 struct mv_port_priv *pp = ap->private_data;
1529 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1530 ata_ehi_push_desc(ehi, "EDMA self-disable");
1533 if (edma_err_cause & EDMA_ERR_SERR) {
1534 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1535 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1536 err_mask = AC_ERR_ATA_BUS;
1537 action |= ATA_EH_HARDRESET;
1541 /* Clear EDMA now that SERR cleanup done */
1542 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1545 err_mask = AC_ERR_OTHER;
1546 action |= ATA_EH_HARDRESET;
1549 ehi->serror |= serr;
1550 ehi->action |= action;
1553 qc->err_mask |= err_mask;
1555 ehi->err_mask |= err_mask;
1557 if (edma_err_cause & eh_freeze_mask)
1558 ata_port_freeze(ap);
1563 static void mv_intr_pio(struct ata_port *ap)
1565 struct ata_queued_cmd *qc;
1568 /* ignore spurious intr if drive still BUSY */
1569 ata_status = readb(ap->ioaddr.status_addr);
1570 if (unlikely(ata_status & ATA_BUSY))
1573 /* get active ATA command */
1574 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1575 if (unlikely(!qc)) /* no active tag */
1577 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1580 /* and finally, complete the ATA command */
1581 qc->err_mask |= ac_err_mask(ata_status);
1582 ata_qc_complete(qc);
1585 static void mv_intr_edma(struct ata_port *ap)
1587 void __iomem *port_mmio = mv_ap_base(ap);
1588 struct mv_host_priv *hpriv = ap->host->private_data;
1589 struct mv_port_priv *pp = ap->private_data;
1590 struct ata_queued_cmd *qc;
1591 u32 out_index, in_index;
1592 bool work_done = false;
1594 /* get h/w response queue pointer */
1595 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1596 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1602 /* get s/w response queue last-read pointer, and compare */
1603 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1604 if (in_index == out_index)
1607 /* 50xx: get active ATA command */
1608 if (IS_GEN_I(hpriv))
1609 tag = ap->link.active_tag;
1611 /* Gen II/IIE: get active ATA command via tag, to enable
1612 * support for queueing. this works transparently for
1613 * queued and non-queued modes.
1616 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1618 qc = ata_qc_from_tag(ap, tag);
1620 /* For non-NCQ mode, the lower 8 bits of status
1621 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1622 * which should be zero if all went well.
1624 status = le16_to_cpu(pp->crpb[out_index].flags);
1625 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1626 mv_err_intr(ap, qc);
1630 /* and finally, complete the ATA command */
1633 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1634 ata_qc_complete(qc);
1637 /* advance software response queue pointer, to
1638 * indicate (after the loop completes) to hardware
1639 * that we have consumed a response queue entry.
1646 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1647 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1648 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1652 * mv_host_intr - Handle all interrupts on the given host controller
1653 * @host: host specific structure
1654 * @relevant: port error bits relevant to this host controller
1655 * @hc: which host controller we're to look at
1657 * Read then write clear the HC interrupt status then walk each
1658 * port connected to the HC and see if it needs servicing. Port
1659 * success ints are reported in the HC interrupt status reg, the
1660 * port error ints are reported in the higher level main
1661 * interrupt status register and thus are passed in via the
1662 * 'relevant' argument.
1665 * Inherited from caller.
1667 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1669 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1670 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1677 port0 = MV_PORTS_PER_HC;
1679 /* we'll need the HC success int register in most cases */
1680 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1684 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1686 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1687 hc, relevant, hc_irq_cause);
1689 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1690 struct ata_port *ap = host->ports[port];
1691 struct mv_port_priv *pp = ap->private_data;
1692 int have_err_bits, hard_port, shift;
1694 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1697 shift = port << 1; /* (port * 2) */
1698 if (port >= MV_PORTS_PER_HC) {
1699 shift++; /* skip bit 8 in the HC Main IRQ reg */
1701 have_err_bits = ((PORT0_ERR << shift) & relevant);
1703 if (unlikely(have_err_bits)) {
1704 struct ata_queued_cmd *qc;
1706 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1707 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1710 mv_err_intr(ap, qc);
1714 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1716 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1717 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1720 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1727 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1729 struct mv_host_priv *hpriv = host->private_data;
1730 struct ata_port *ap;
1731 struct ata_queued_cmd *qc;
1732 struct ata_eh_info *ehi;
1733 unsigned int i, err_mask, printed = 0;
1736 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1738 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1741 DPRINTK("All regs @ PCI error\n");
1742 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1744 writelfl(0, mmio + hpriv->irq_cause_ofs);
1746 for (i = 0; i < host->n_ports; i++) {
1747 ap = host->ports[i];
1748 if (!ata_link_offline(&ap->link)) {
1749 ehi = &ap->link.eh_info;
1750 ata_ehi_clear_desc(ehi);
1752 ata_ehi_push_desc(ehi,
1753 "PCI err cause 0x%08x", err_cause);
1754 err_mask = AC_ERR_HOST_BUS;
1755 ehi->action = ATA_EH_HARDRESET;
1756 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1758 qc->err_mask |= err_mask;
1760 ehi->err_mask |= err_mask;
1762 ata_port_freeze(ap);
1768 * mv_interrupt - Main interrupt event handler
1770 * @dev_instance: private data; in this case the host structure
1772 * Read the read only register to determine if any host
1773 * controllers have pending interrupts. If so, call lower level
1774 * routine to handle. Also check for PCI errors which are only
1778 * This routine holds the host lock while processing pending
1781 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1783 struct ata_host *host = dev_instance;
1784 unsigned int hc, handled = 0, n_hcs;
1785 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1786 u32 irq_stat, irq_mask;
1788 spin_lock(&host->lock);
1789 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1790 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
1792 /* check the cases where we either have nothing pending or have read
1793 * a bogus register value which can indicate HW removal or PCI fault
1795 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1798 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1800 if (unlikely(irq_stat & PCI_ERR)) {
1801 mv_pci_error(host, mmio);
1803 goto out_unlock; /* skip all other HC irq handling */
1806 for (hc = 0; hc < n_hcs; hc++) {
1807 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1809 mv_host_intr(host, relevant, hc);
1815 spin_unlock(&host->lock);
1817 return IRQ_RETVAL(handled);
1820 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1822 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1823 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1825 return hc_mmio + ofs;
1828 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1832 switch (sc_reg_in) {
1836 ofs = sc_reg_in * sizeof(u32);
1845 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1847 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1848 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1849 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1851 if (ofs != 0xffffffffU) {
1852 *val = readl(addr + ofs);
1858 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1860 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1861 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1862 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1864 if (ofs != 0xffffffffU) {
1865 writelfl(val, addr + ofs);
1871 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1875 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1878 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1880 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1883 mv_reset_pci_bus(pdev, mmio);
1886 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1888 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1891 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1894 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1897 tmp = readl(phy_mmio + MV5_PHY_MODE);
1899 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1900 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1903 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1907 writel(0, mmio + MV_GPIO_PORT_CTL);
1909 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1911 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1913 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1916 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1919 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1920 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1922 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1925 tmp = readl(phy_mmio + MV5_LT_MODE);
1927 writel(tmp, phy_mmio + MV5_LT_MODE);
1929 tmp = readl(phy_mmio + MV5_PHY_CTL);
1932 writel(tmp, phy_mmio + MV5_PHY_CTL);
1935 tmp = readl(phy_mmio + MV5_PHY_MODE);
1937 tmp |= hpriv->signal[port].pre;
1938 tmp |= hpriv->signal[port].amps;
1939 writel(tmp, phy_mmio + MV5_PHY_MODE);
1944 #define ZERO(reg) writel(0, port_mmio + (reg))
1945 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1948 void __iomem *port_mmio = mv_port_base(mmio, port);
1950 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1952 mv_channel_reset(hpriv, mmio, port);
1954 ZERO(0x028); /* command */
1955 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1956 ZERO(0x004); /* timer */
1957 ZERO(0x008); /* irq err cause */
1958 ZERO(0x00c); /* irq err mask */
1959 ZERO(0x010); /* rq bah */
1960 ZERO(0x014); /* rq inp */
1961 ZERO(0x018); /* rq outp */
1962 ZERO(0x01c); /* respq bah */
1963 ZERO(0x024); /* respq outp */
1964 ZERO(0x020); /* respq inp */
1965 ZERO(0x02c); /* test control */
1966 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1970 #define ZERO(reg) writel(0, hc_mmio + (reg))
1971 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1974 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1982 tmp = readl(hc_mmio + 0x20);
1985 writel(tmp, hc_mmio + 0x20);
1989 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1992 unsigned int hc, port;
1994 for (hc = 0; hc < n_hc; hc++) {
1995 for (port = 0; port < MV_PORTS_PER_HC; port++)
1996 mv5_reset_hc_port(hpriv, mmio,
1997 (hc * MV_PORTS_PER_HC) + port);
1999 mv5_reset_one_hc(hpriv, mmio, hc);
2006 #define ZERO(reg) writel(0, mmio + (reg))
2007 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
2009 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2010 struct mv_host_priv *hpriv = host->private_data;
2013 tmp = readl(mmio + MV_PCI_MODE);
2015 writel(tmp, mmio + MV_PCI_MODE);
2017 ZERO(MV_PCI_DISC_TIMER);
2018 ZERO(MV_PCI_MSI_TRIGGER);
2019 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2020 ZERO(HC_MAIN_IRQ_MASK_OFS);
2021 ZERO(MV_PCI_SERR_MASK);
2022 ZERO(hpriv->irq_cause_ofs);
2023 ZERO(hpriv->irq_mask_ofs);
2024 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2025 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2026 ZERO(MV_PCI_ERR_ATTRIBUTE);
2027 ZERO(MV_PCI_ERR_COMMAND);
2031 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2035 mv5_reset_flash(hpriv, mmio);
2037 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2039 tmp |= (1 << 5) | (1 << 6);
2040 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2044 * mv6_reset_hc - Perform the 6xxx global soft reset
2045 * @mmio: base address of the HBA
2047 * This routine only applies to 6xxx parts.
2050 * Inherited from caller.
2052 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2055 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2059 /* Following procedure defined in PCI "main command and status
2063 writel(t | STOP_PCI_MASTER, reg);
2065 for (i = 0; i < 1000; i++) {
2068 if (PCI_MASTER_EMPTY & t)
2071 if (!(PCI_MASTER_EMPTY & t)) {
2072 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2080 writel(t | GLOB_SFT_RST, reg);
2083 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2085 if (!(GLOB_SFT_RST & t)) {
2086 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2091 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2094 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2097 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2099 if (GLOB_SFT_RST & t) {
2100 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2107 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2110 void __iomem *port_mmio;
2113 tmp = readl(mmio + MV_RESET_CFG);
2114 if ((tmp & (1 << 0)) == 0) {
2115 hpriv->signal[idx].amps = 0x7 << 8;
2116 hpriv->signal[idx].pre = 0x1 << 5;
2120 port_mmio = mv_port_base(mmio, idx);
2121 tmp = readl(port_mmio + PHY_MODE2);
2123 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2124 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2127 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2129 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2132 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2135 void __iomem *port_mmio = mv_port_base(mmio, port);
2137 u32 hp_flags = hpriv->hp_flags;
2139 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2141 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2144 if (fix_phy_mode2) {
2145 m2 = readl(port_mmio + PHY_MODE2);
2148 writel(m2, port_mmio + PHY_MODE2);
2152 m2 = readl(port_mmio + PHY_MODE2);
2153 m2 &= ~((1 << 16) | (1 << 31));
2154 writel(m2, port_mmio + PHY_MODE2);
2159 /* who knows what this magic does */
2160 tmp = readl(port_mmio + PHY_MODE3);
2163 writel(tmp, port_mmio + PHY_MODE3);
2165 if (fix_phy_mode4) {
2168 m4 = readl(port_mmio + PHY_MODE4);
2170 if (hp_flags & MV_HP_ERRATA_60X1B2)
2171 tmp = readl(port_mmio + 0x310);
2173 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2175 writel(m4, port_mmio + PHY_MODE4);
2177 if (hp_flags & MV_HP_ERRATA_60X1B2)
2178 writel(tmp, port_mmio + 0x310);
2181 /* Revert values of pre-emphasis and signal amps to the saved ones */
2182 m2 = readl(port_mmio + PHY_MODE2);
2184 m2 &= ~MV_M2_PREAMP_MASK;
2185 m2 |= hpriv->signal[port].amps;
2186 m2 |= hpriv->signal[port].pre;
2189 /* according to mvSata 3.6.1, some IIE values are fixed */
2190 if (IS_GEN_IIE(hpriv)) {
2195 writel(m2, port_mmio + PHY_MODE2);
2198 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2199 unsigned int port_no)
2201 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2203 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2205 if (IS_GEN_II(hpriv)) {
2206 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2207 ifctl |= (1 << 7); /* enable gen2i speed */
2208 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2209 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2212 udelay(25); /* allow reset propagation */
2214 /* Spec never mentions clearing the bit. Marvell's driver does
2215 * clear the bit, however.
2217 writelfl(0, port_mmio + EDMA_CMD_OFS);
2219 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2221 if (IS_GEN_I(hpriv))
2226 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2227 * @ap: ATA channel to manipulate
2229 * Part of this is taken from __sata_phy_reset and modified to
2230 * not sleep since this routine gets called from interrupt level.
2233 * Inherited from caller. This is coded to safe to call at
2234 * interrupt level, i.e. it does not sleep.
2236 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2237 unsigned long deadline)
2239 struct mv_port_priv *pp = ap->private_data;
2240 struct mv_host_priv *hpriv = ap->host->private_data;
2241 void __iomem *port_mmio = mv_ap_base(ap);
2245 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2249 u32 sstatus, serror, scontrol;
2251 mv_scr_read(ap, SCR_STATUS, &sstatus);
2252 mv_scr_read(ap, SCR_ERROR, &serror);
2253 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2254 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2255 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2259 /* Issue COMRESET via SControl */
2261 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2264 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2268 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2269 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2273 } while (time_before(jiffies, deadline));
2275 /* work around errata */
2276 if (IS_GEN_II(hpriv) &&
2277 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2279 goto comreset_retry;
2283 u32 sstatus, serror, scontrol;
2285 mv_scr_read(ap, SCR_STATUS, &sstatus);
2286 mv_scr_read(ap, SCR_ERROR, &serror);
2287 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2288 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2289 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2293 if (ata_link_offline(&ap->link)) {
2294 *class = ATA_DEV_NONE;
2298 /* even after SStatus reflects that device is ready,
2299 * it seems to take a while for link to be fully
2300 * established (and thus Status no longer 0x80/0x7F),
2301 * so we poll a bit for that, here.
2305 u8 drv_stat = ata_check_status(ap);
2306 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2311 if (time_after(jiffies, deadline))
2315 /* FIXME: if we passed the deadline, the following
2316 * code probably produces an invalid result
2319 /* finally, read device signature from TF registers */
2320 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2322 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2324 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2329 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2331 struct ata_port *ap = link->ap;
2332 struct mv_port_priv *pp = ap->private_data;
2333 struct ata_eh_context *ehc = &link->eh_context;
2336 rc = mv_stop_dma(ap);
2338 ehc->i.action |= ATA_EH_HARDRESET;
2340 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2341 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2342 ehc->i.action |= ATA_EH_HARDRESET;
2345 /* if we're about to do hardreset, nothing more to do */
2346 if (ehc->i.action & ATA_EH_HARDRESET)
2349 if (ata_link_online(link))
2350 rc = ata_wait_ready(ap, deadline);
2357 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2358 unsigned long deadline)
2360 struct ata_port *ap = link->ap;
2361 struct mv_host_priv *hpriv = ap->host->private_data;
2362 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2366 mv_channel_reset(hpriv, mmio, ap->port_no);
2368 mv_phy_reset(ap, class, deadline);
2373 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2375 struct ata_port *ap = link->ap;
2378 /* print link status */
2379 sata_print_link_status(link);
2382 sata_scr_read(link, SCR_ERROR, &serr);
2383 sata_scr_write_flush(link, SCR_ERROR, serr);
2385 /* bail out if no device is present */
2386 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2387 DPRINTK("EXIT, no device\n");
2391 /* set up device control */
2392 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2395 static void mv_error_handler(struct ata_port *ap)
2397 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2398 mv_hardreset, mv_postreset);
2401 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2403 mv_stop_dma(qc->ap);
2406 static void mv_eh_freeze(struct ata_port *ap)
2408 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2409 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2413 /* FIXME: handle coalescing completion events properly */
2415 shift = ap->port_no * 2;
2419 mask = 0x3 << shift;
2421 /* disable assertion of portN err, done events */
2422 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2423 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2426 static void mv_eh_thaw(struct ata_port *ap)
2428 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2429 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2430 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2431 void __iomem *port_mmio = mv_ap_base(ap);
2432 u32 tmp, mask, hc_irq_cause;
2433 unsigned int shift, hc_port_no = ap->port_no;
2435 /* FIXME: handle coalescing completion events properly */
2437 shift = ap->port_no * 2;
2443 mask = 0x3 << shift;
2445 /* clear EDMA errors on this port */
2446 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2448 /* clear pending irq events */
2449 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2450 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2451 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2452 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2454 /* enable assertion of portN err, done events */
2455 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2456 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2460 * mv_port_init - Perform some early initialization on a single port.
2461 * @port: libata data structure storing shadow register addresses
2462 * @port_mmio: base address of the port
2464 * Initialize shadow register mmio addresses, clear outstanding
2465 * interrupts on the port, and unmask interrupts for the future
2466 * start of the port.
2469 * Inherited from caller.
2471 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2473 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2476 /* PIO related setup
2478 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2480 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2481 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2482 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2483 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2484 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2485 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2487 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2488 /* special case: control/altstatus doesn't have ATA_REG_ address */
2489 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2492 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2494 /* Clear any currently outstanding port interrupt conditions */
2495 serr_ofs = mv_scr_offset(SCR_ERROR);
2496 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2497 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2499 /* unmask all non-transient EDMA error interrupts */
2500 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2502 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2503 readl(port_mmio + EDMA_CFG_OFS),
2504 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2505 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2508 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2510 struct pci_dev *pdev = to_pci_dev(host->dev);
2511 struct mv_host_priv *hpriv = host->private_data;
2512 u32 hp_flags = hpriv->hp_flags;
2514 switch (board_idx) {
2516 hpriv->ops = &mv5xxx_ops;
2517 hp_flags |= MV_HP_GEN_I;
2519 switch (pdev->revision) {
2521 hp_flags |= MV_HP_ERRATA_50XXB0;
2524 hp_flags |= MV_HP_ERRATA_50XXB2;
2527 dev_printk(KERN_WARNING, &pdev->dev,
2528 "Applying 50XXB2 workarounds to unknown rev\n");
2529 hp_flags |= MV_HP_ERRATA_50XXB2;
2536 hpriv->ops = &mv5xxx_ops;
2537 hp_flags |= MV_HP_GEN_I;
2539 switch (pdev->revision) {
2541 hp_flags |= MV_HP_ERRATA_50XXB0;
2544 hp_flags |= MV_HP_ERRATA_50XXB2;
2547 dev_printk(KERN_WARNING, &pdev->dev,
2548 "Applying B2 workarounds to unknown rev\n");
2549 hp_flags |= MV_HP_ERRATA_50XXB2;
2556 hpriv->ops = &mv6xxx_ops;
2557 hp_flags |= MV_HP_GEN_II;
2559 switch (pdev->revision) {
2561 hp_flags |= MV_HP_ERRATA_60X1B2;
2564 hp_flags |= MV_HP_ERRATA_60X1C0;
2567 dev_printk(KERN_WARNING, &pdev->dev,
2568 "Applying B2 workarounds to unknown rev\n");
2569 hp_flags |= MV_HP_ERRATA_60X1B2;
2575 hp_flags |= MV_HP_PCIE;
2576 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2577 (pdev->device == 0x2300 || pdev->device == 0x2310))
2580 * Highpoint RocketRAID PCIe 23xx series cards:
2582 * Unconfigured drives are treated as "Legacy"
2583 * by the BIOS, and it overwrites sector 8 with
2584 * a "Lgcy" metadata block prior to Linux boot.
2586 * Configured drives (RAID or JBOD) leave sector 8
2587 * alone, but instead overwrite a high numbered
2588 * sector for the RAID metadata. This sector can
2589 * be determined exactly, by truncating the physical
2590 * drive capacity to a nice even GB value.
2592 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2594 * Warn the user, lest they think we're just buggy.
2596 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2597 " BIOS CORRUPTS DATA on all attached drives,"
2598 " regardless of if/how they are configured."
2600 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2601 " use sectors 8-9 on \"Legacy\" drives,"
2602 " and avoid the final two gigabytes on"
2603 " all RocketRAID BIOS initialized drives.\n");
2606 hpriv->ops = &mv6xxx_ops;
2607 hp_flags |= MV_HP_GEN_IIE;
2609 switch (pdev->revision) {
2611 hp_flags |= MV_HP_ERRATA_XX42A0;
2614 hp_flags |= MV_HP_ERRATA_60X1C0;
2617 dev_printk(KERN_WARNING, &pdev->dev,
2618 "Applying 60X1C0 workarounds to unknown rev\n");
2619 hp_flags |= MV_HP_ERRATA_60X1C0;
2625 dev_printk(KERN_ERR, &pdev->dev,
2626 "BUG: invalid board index %u\n", board_idx);
2630 hpriv->hp_flags = hp_flags;
2631 if (hp_flags & MV_HP_PCIE) {
2632 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2633 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2634 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2636 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2637 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2638 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2645 * mv_init_host - Perform some early initialization of the host.
2646 * @host: ATA host to initialize
2647 * @board_idx: controller index
2649 * If possible, do an early global reset of the host. Then do
2650 * our port init and clear/unmask all/relevant host interrupts.
2653 * Inherited from caller.
2655 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2657 int rc = 0, n_hc, port, hc;
2658 struct pci_dev *pdev = to_pci_dev(host->dev);
2659 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2660 struct mv_host_priv *hpriv = host->private_data;
2662 /* global interrupt mask */
2663 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2665 rc = mv_chip_id(host, board_idx);
2669 n_hc = mv_get_hc_count(host->ports[0]->flags);
2671 for (port = 0; port < host->n_ports; port++)
2672 hpriv->ops->read_preamp(hpriv, port, mmio);
2674 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2678 hpriv->ops->reset_flash(hpriv, mmio);
2679 hpriv->ops->reset_bus(pdev, mmio);
2680 hpriv->ops->enable_leds(hpriv, mmio);
2682 for (port = 0; port < host->n_ports; port++) {
2683 if (IS_GEN_II(hpriv)) {
2684 void __iomem *port_mmio = mv_port_base(mmio, port);
2686 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2687 ifctl |= (1 << 7); /* enable gen2i speed */
2688 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2689 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2692 hpriv->ops->phy_errata(hpriv, mmio, port);
2695 for (port = 0; port < host->n_ports; port++) {
2696 struct ata_port *ap = host->ports[port];
2697 void __iomem *port_mmio = mv_port_base(mmio, port);
2698 unsigned int offset = port_mmio - mmio;
2700 mv_port_init(&ap->ioaddr, port_mmio);
2702 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2703 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2706 for (hc = 0; hc < n_hc; hc++) {
2707 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2709 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2710 "(before clear)=0x%08x\n", hc,
2711 readl(hc_mmio + HC_CFG_OFS),
2712 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2714 /* Clear any currently outstanding hc interrupt conditions */
2715 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2718 /* Clear any currently outstanding host interrupt conditions */
2719 writelfl(0, mmio + hpriv->irq_cause_ofs);
2721 /* and unmask interrupt generation for host regs */
2722 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2724 if (IS_GEN_I(hpriv))
2725 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2727 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2729 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2730 "PCI int cause/mask=0x%08x/0x%08x\n",
2731 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2732 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2733 readl(mmio + hpriv->irq_cause_ofs),
2734 readl(mmio + hpriv->irq_mask_ofs));
2741 * mv_print_info - Dump key info to kernel log for perusal.
2742 * @host: ATA host to print info about
2744 * FIXME: complete this.
2747 * Inherited from caller.
2749 static void mv_print_info(struct ata_host *host)
2751 struct pci_dev *pdev = to_pci_dev(host->dev);
2752 struct mv_host_priv *hpriv = host->private_data;
2754 const char *scc_s, *gen;
2756 /* Use this to determine the HW stepping of the chip so we know
2757 * what errata to workaround
2759 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2762 else if (scc == 0x01)
2767 if (IS_GEN_I(hpriv))
2769 else if (IS_GEN_II(hpriv))
2771 else if (IS_GEN_IIE(hpriv))
2776 dev_printk(KERN_INFO, &pdev->dev,
2777 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2778 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2779 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2782 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2784 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2786 if (!hpriv->crqb_pool)
2789 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2791 if (!hpriv->crpb_pool)
2794 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2796 if (!hpriv->sg_tbl_pool)
2803 * mv_init_one - handle a positive probe of a Marvell host
2804 * @pdev: PCI device found
2805 * @ent: PCI device ID entry for the matched host
2808 * Inherited from caller.
2810 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2812 static int printed_version;
2813 unsigned int board_idx = (unsigned int)ent->driver_data;
2814 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2815 struct ata_host *host;
2816 struct mv_host_priv *hpriv;
2819 if (!printed_version++)
2820 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2823 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2825 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2826 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2827 if (!host || !hpriv)
2829 host->private_data = hpriv;
2831 /* acquire resources */
2832 rc = pcim_enable_device(pdev);
2836 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2838 pcim_pin_device(pdev);
2841 host->iomap = pcim_iomap_table(pdev);
2843 rc = pci_go_64(pdev);
2847 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2851 /* initialize adapter */
2852 rc = mv_init_host(host, board_idx);
2856 /* Enable interrupts */
2857 if (msi && pci_enable_msi(pdev))
2860 mv_dump_pci_cfg(pdev, 0x68);
2861 mv_print_info(host);
2863 pci_set_master(pdev);
2864 pci_try_set_mwi(pdev);
2865 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2866 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2869 static int __init mv_init(void)
2871 return pci_register_driver(&mv_pci_driver);
2874 static void __exit mv_exit(void)
2876 pci_unregister_driver(&mv_pci_driver);
2879 MODULE_AUTHOR("Brett Russ");
2880 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2881 MODULE_LICENSE("GPL");
2882 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2883 MODULE_VERSION(DRV_VERSION);
2885 module_param(msi, int, 0444);
2886 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2888 module_init(mv_init);
2889 module_exit(mv_exit);