2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 #include <linux/init.h>
28 #include <linux/blkdev.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/sched.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_cmnd.h>
36 #include <linux/libata.h>
39 #define DRV_NAME "sata_mv"
40 #define DRV_VERSION "0.6"
43 /* BAR's are enumerated in terms of pci_resource_start() terms */
44 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
45 MV_IO_BAR = 2, /* offset 0x18: IO space */
46 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
48 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
49 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
52 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
53 MV_SATAHC0_REG_BASE = 0x20000,
54 MV_FLASH_CTL = 0x1046c,
55 MV_GPIO_PORT_CTL = 0x104f0,
56 MV_RESET_CFG = 0x180d8,
58 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
59 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
60 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
61 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
63 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
66 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
68 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
69 * CRPB needs alignment on a 256B boundary. Size == 256B
70 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
71 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
73 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
74 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
76 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
77 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
80 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
82 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
86 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
87 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
88 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
89 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
91 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
93 CRQB_FLAG_READ = (1 << 0),
95 CRQB_CMD_ADDR_SHIFT = 8,
96 CRQB_CMD_CS = (0x2 << 11),
97 CRQB_CMD_LAST = (1 << 15),
99 CRPB_FLAG_STATUS_SHIFT = 8,
101 EPRD_FLAG_END_OF_TBL = (1 << 31),
103 /* PCI interface registers */
105 PCI_COMMAND_OFS = 0xc00,
107 PCI_MAIN_CMD_STS_OFS = 0xd30,
108 STOP_PCI_MASTER = (1 << 2),
109 PCI_MASTER_EMPTY = (1 << 3),
110 GLOB_SFT_RST = (1 << 4),
113 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
114 MV_PCI_DISC_TIMER = 0xd04,
115 MV_PCI_MSI_TRIGGER = 0xc38,
116 MV_PCI_SERR_MASK = 0xc28,
117 MV_PCI_XBAR_TMOUT = 0x1d04,
118 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
119 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
120 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
121 MV_PCI_ERR_COMMAND = 0x1d50,
123 PCI_IRQ_CAUSE_OFS = 0x1d58,
124 PCI_IRQ_MASK_OFS = 0x1d5c,
125 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
127 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
128 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
129 PORT0_ERR = (1 << 0), /* shift by port # */
130 PORT0_DONE = (1 << 1), /* shift by port # */
131 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
132 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
134 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
135 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
136 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
137 GPIO_INT = (1 << 22),
138 SELF_INT = (1 << 23),
139 TWSI_INT = (1 << 24),
140 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
141 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
142 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
145 /* SATAHC registers */
148 HC_IRQ_CAUSE_OFS = 0x14,
149 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
150 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
151 DEV_IRQ = (1 << 8), /* shift by port # */
153 /* Shadow block registers */
155 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
158 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
159 SATA_ACTIVE_OFS = 0x350,
166 SATA_INTERFACE_CTL = 0x050,
168 MV_M2_PREAMP_MASK = 0x7e0,
172 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
173 EDMA_CFG_NCQ = (1 << 5),
174 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
175 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
176 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
178 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
179 EDMA_ERR_IRQ_MASK_OFS = 0xc,
180 EDMA_ERR_D_PAR = (1 << 0),
181 EDMA_ERR_PRD_PAR = (1 << 1),
182 EDMA_ERR_DEV = (1 << 2),
183 EDMA_ERR_DEV_DCON = (1 << 3),
184 EDMA_ERR_DEV_CON = (1 << 4),
185 EDMA_ERR_SERR = (1 << 5),
186 EDMA_ERR_SELF_DIS = (1 << 7),
187 EDMA_ERR_BIST_ASYNC = (1 << 8),
188 EDMA_ERR_CRBQ_PAR = (1 << 9),
189 EDMA_ERR_CRPB_PAR = (1 << 10),
190 EDMA_ERR_INTRL_PAR = (1 << 11),
191 EDMA_ERR_IORDY = (1 << 12),
192 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
193 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
194 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
195 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
196 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
197 EDMA_ERR_TRANS_PROTO = (1 << 31),
198 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
199 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
200 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
201 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
202 EDMA_ERR_LNK_DATA_RX |
203 EDMA_ERR_LNK_DATA_TX |
204 EDMA_ERR_TRANS_PROTO),
206 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
207 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
209 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
210 EDMA_REQ_Q_PTR_SHIFT = 5,
212 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
213 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
214 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
215 EDMA_RSP_Q_PTR_SHIFT = 3,
222 EDMA_IORDY_TMOUT = 0x34,
225 /* Host private flags (hp_flags) */
226 MV_HP_FLAG_MSI = (1 << 0),
227 MV_HP_ERRATA_50XXB0 = (1 << 1),
228 MV_HP_ERRATA_50XXB2 = (1 << 2),
229 MV_HP_ERRATA_60X1B2 = (1 << 3),
230 MV_HP_ERRATA_60X1C0 = (1 << 4),
231 MV_HP_ERRATA_XX42A0 = (1 << 5),
232 MV_HP_50XX = (1 << 6),
233 MV_HP_GEN_IIE = (1 << 7),
235 /* Port private flags (pp_flags) */
236 MV_PP_FLAG_EDMA_EN = (1 << 0),
237 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
240 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
241 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
242 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
243 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
244 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
247 /* Our DMA boundary is determined by an ePRD being unable to handle
248 * anything larger than 64KB
250 MV_DMA_BOUNDARY = 0xffffU,
252 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
254 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
267 /* Command ReQuest Block: 32B */
283 /* Command ResPonse Block: 8B */
290 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
298 struct mv_port_priv {
299 struct mv_crqb *crqb;
301 struct mv_crpb *crpb;
303 struct mv_sg *sg_tbl;
304 dma_addr_t sg_tbl_dma;
306 unsigned req_producer; /* cp of req_in_ptr */
307 unsigned rsp_consumer; /* cp of rsp_out_ptr */
311 struct mv_port_signal {
318 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
320 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
321 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
323 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
325 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
326 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
329 struct mv_host_priv {
331 struct mv_port_signal signal[8];
332 const struct mv_hw_ops *ops;
335 static void mv_irq_clear(struct ata_port *ap);
336 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
337 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
338 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
339 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
340 static void mv_phy_reset(struct ata_port *ap);
341 static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
342 static void mv_host_stop(struct ata_host_set *host_set);
343 static int mv_port_start(struct ata_port *ap);
344 static void mv_port_stop(struct ata_port *ap);
345 static void mv_qc_prep(struct ata_queued_cmd *qc);
346 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
347 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
348 static irqreturn_t mv_interrupt(int irq, void *dev_instance,
349 struct pt_regs *regs);
350 static void mv_eng_timeout(struct ata_port *ap);
351 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
353 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
355 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
356 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
358 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
360 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
361 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
363 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
365 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
366 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
368 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
370 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
371 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
372 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
373 unsigned int port_no);
374 static void mv_stop_and_reset(struct ata_port *ap);
376 static struct scsi_host_template mv_sht = {
377 .module = THIS_MODULE,
379 .ioctl = ata_scsi_ioctl,
380 .queuecommand = ata_scsi_queuecmd,
381 .can_queue = MV_USE_Q_DEPTH,
382 .this_id = ATA_SHT_THIS_ID,
383 .sg_tablesize = MV_MAX_SG_CT / 2,
384 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
385 .emulated = ATA_SHT_EMULATED,
386 .use_clustering = ATA_SHT_USE_CLUSTERING,
387 .proc_name = DRV_NAME,
388 .dma_boundary = MV_DMA_BOUNDARY,
389 .slave_configure = ata_scsi_slave_config,
390 .bios_param = ata_std_bios_param,
393 static const struct ata_port_operations mv5_ops = {
394 .port_disable = ata_port_disable,
396 .tf_load = ata_tf_load,
397 .tf_read = ata_tf_read,
398 .check_status = ata_check_status,
399 .exec_command = ata_exec_command,
400 .dev_select = ata_std_dev_select,
402 .phy_reset = mv_phy_reset,
404 .qc_prep = mv_qc_prep,
405 .qc_issue = mv_qc_issue,
407 .eng_timeout = mv_eng_timeout,
409 .irq_handler = mv_interrupt,
410 .irq_clear = mv_irq_clear,
412 .scr_read = mv5_scr_read,
413 .scr_write = mv5_scr_write,
415 .port_start = mv_port_start,
416 .port_stop = mv_port_stop,
417 .host_stop = mv_host_stop,
420 static const struct ata_port_operations mv6_ops = {
421 .port_disable = ata_port_disable,
423 .tf_load = ata_tf_load,
424 .tf_read = ata_tf_read,
425 .check_status = ata_check_status,
426 .exec_command = ata_exec_command,
427 .dev_select = ata_std_dev_select,
429 .phy_reset = mv_phy_reset,
431 .qc_prep = mv_qc_prep,
432 .qc_issue = mv_qc_issue,
434 .eng_timeout = mv_eng_timeout,
436 .irq_handler = mv_interrupt,
437 .irq_clear = mv_irq_clear,
439 .scr_read = mv_scr_read,
440 .scr_write = mv_scr_write,
442 .port_start = mv_port_start,
443 .port_stop = mv_port_stop,
444 .host_stop = mv_host_stop,
447 static const struct ata_port_operations mv_iie_ops = {
448 .port_disable = ata_port_disable,
450 .tf_load = ata_tf_load,
451 .tf_read = ata_tf_read,
452 .check_status = ata_check_status,
453 .exec_command = ata_exec_command,
454 .dev_select = ata_std_dev_select,
456 .phy_reset = mv_phy_reset,
458 .qc_prep = mv_qc_prep_iie,
459 .qc_issue = mv_qc_issue,
461 .eng_timeout = mv_eng_timeout,
463 .irq_handler = mv_interrupt,
464 .irq_clear = mv_irq_clear,
466 .scr_read = mv_scr_read,
467 .scr_write = mv_scr_write,
469 .port_start = mv_port_start,
470 .port_stop = mv_port_stop,
471 .host_stop = mv_host_stop,
474 static const struct ata_port_info mv_port_info[] = {
477 .host_flags = MV_COMMON_FLAGS,
478 .pio_mask = 0x1f, /* pio0-4 */
479 .udma_mask = 0x7f, /* udma0-6 */
480 .port_ops = &mv5_ops,
484 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
485 .pio_mask = 0x1f, /* pio0-4 */
486 .udma_mask = 0x7f, /* udma0-6 */
487 .port_ops = &mv5_ops,
491 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
492 .pio_mask = 0x1f, /* pio0-4 */
493 .udma_mask = 0x7f, /* udma0-6 */
494 .port_ops = &mv5_ops,
498 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
499 .pio_mask = 0x1f, /* pio0-4 */
500 .udma_mask = 0x7f, /* udma0-6 */
501 .port_ops = &mv6_ops,
505 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
507 .pio_mask = 0x1f, /* pio0-4 */
508 .udma_mask = 0x7f, /* udma0-6 */
509 .port_ops = &mv6_ops,
513 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
514 .pio_mask = 0x1f, /* pio0-4 */
515 .udma_mask = 0x7f, /* udma0-6 */
516 .port_ops = &mv_iie_ops,
520 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
522 .pio_mask = 0x1f, /* pio0-4 */
523 .udma_mask = 0x7f, /* udma0-6 */
524 .port_ops = &mv_iie_ops,
528 static const struct pci_device_id mv_pci_tbl[] = {
529 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
530 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
531 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_5080},
532 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
534 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
535 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
540 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x0241), 0, 0, chip_604x},
541 {} /* terminate list */
544 static struct pci_driver mv_pci_driver = {
546 .id_table = mv_pci_tbl,
547 .probe = mv_init_one,
548 .remove = ata_pci_remove_one,
551 static const struct mv_hw_ops mv5xxx_ops = {
552 .phy_errata = mv5_phy_errata,
553 .enable_leds = mv5_enable_leds,
554 .read_preamp = mv5_read_preamp,
555 .reset_hc = mv5_reset_hc,
556 .reset_flash = mv5_reset_flash,
557 .reset_bus = mv5_reset_bus,
560 static const struct mv_hw_ops mv6xxx_ops = {
561 .phy_errata = mv6_phy_errata,
562 .enable_leds = mv6_enable_leds,
563 .read_preamp = mv6_read_preamp,
564 .reset_hc = mv6_reset_hc,
565 .reset_flash = mv6_reset_flash,
566 .reset_bus = mv_reset_pci_bus,
572 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
579 static inline void writelfl(unsigned long data, void __iomem *addr)
582 (void) readl(addr); /* flush to avoid PCI posted write */
585 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
587 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
590 static inline unsigned int mv_hc_from_port(unsigned int port)
592 return port >> MV_PORT_HC_SHIFT;
595 static inline unsigned int mv_hardport_from_port(unsigned int port)
597 return port & MV_PORT_MASK;
600 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
603 return mv_hc_base(base, mv_hc_from_port(port));
606 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
608 return mv_hc_base_from_port(base, port) +
609 MV_SATAHC_ARBTR_REG_SZ +
610 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
613 static inline void __iomem *mv_ap_base(struct ata_port *ap)
615 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
618 static inline int mv_get_hc_count(unsigned long host_flags)
620 return ((host_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
623 static void mv_irq_clear(struct ata_port *ap)
628 * mv_start_dma - Enable eDMA engine
629 * @base: port base address
630 * @pp: port private data
632 * Verify the local cache of the eDMA state is accurate with a
636 * Inherited from caller.
638 static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
640 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
641 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
642 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
644 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
648 * mv_stop_dma - Disable eDMA engine
649 * @ap: ATA channel to manipulate
651 * Verify the local cache of the eDMA state is accurate with a
655 * Inherited from caller.
657 static void mv_stop_dma(struct ata_port *ap)
659 void __iomem *port_mmio = mv_ap_base(ap);
660 struct mv_port_priv *pp = ap->private_data;
664 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
665 /* Disable EDMA if active. The disable bit auto clears.
667 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
668 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
670 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
673 /* now properly wait for the eDMA to stop */
674 for (i = 1000; i > 0; i--) {
675 reg = readl(port_mmio + EDMA_CMD_OFS);
676 if (!(EDMA_EN & reg)) {
683 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id);
684 /* FIXME: Consider doing a reset here to recover */
689 static void mv_dump_mem(void __iomem *start, unsigned bytes)
692 for (b = 0; b < bytes; ) {
693 DPRINTK("%p: ", start + b);
694 for (w = 0; b < bytes && w < 4; w++) {
695 printk("%08x ",readl(start + b));
703 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
708 for (b = 0; b < bytes; ) {
709 DPRINTK("%02x: ", b);
710 for (w = 0; b < bytes && w < 4; w++) {
711 (void) pci_read_config_dword(pdev,b,&dw);
719 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
720 struct pci_dev *pdev)
723 void __iomem *hc_base = mv_hc_base(mmio_base,
724 port >> MV_PORT_HC_SHIFT);
725 void __iomem *port_base;
726 int start_port, num_ports, p, start_hc, num_hcs, hc;
729 start_hc = start_port = 0;
730 num_ports = 8; /* shld be benign for 4 port devs */
733 start_hc = port >> MV_PORT_HC_SHIFT;
735 num_ports = num_hcs = 1;
737 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
738 num_ports > 1 ? num_ports - 1 : start_port);
741 DPRINTK("PCI config space regs:\n");
742 mv_dump_pci_cfg(pdev, 0x68);
744 DPRINTK("PCI regs:\n");
745 mv_dump_mem(mmio_base+0xc00, 0x3c);
746 mv_dump_mem(mmio_base+0xd00, 0x34);
747 mv_dump_mem(mmio_base+0xf00, 0x4);
748 mv_dump_mem(mmio_base+0x1d00, 0x6c);
749 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
750 hc_base = mv_hc_base(mmio_base, hc);
751 DPRINTK("HC regs (HC %i):\n", hc);
752 mv_dump_mem(hc_base, 0x1c);
754 for (p = start_port; p < start_port + num_ports; p++) {
755 port_base = mv_port_base(mmio_base, p);
756 DPRINTK("EDMA regs (port %i):\n",p);
757 mv_dump_mem(port_base, 0x54);
758 DPRINTK("SATA regs (port %i):\n",p);
759 mv_dump_mem(port_base+0x300, 0x60);
764 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
772 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
775 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
784 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
786 unsigned int ofs = mv_scr_offset(sc_reg_in);
788 if (0xffffffffU != ofs) {
789 return readl(mv_ap_base(ap) + ofs);
795 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
797 unsigned int ofs = mv_scr_offset(sc_reg_in);
799 if (0xffffffffU != ofs) {
800 writelfl(val, mv_ap_base(ap) + ofs);
805 * mv_host_stop - Host specific cleanup/stop routine.
806 * @host_set: host data structure
808 * Disable ints, cleanup host memory, call general purpose
812 * Inherited from caller.
814 static void mv_host_stop(struct ata_host_set *host_set)
816 struct mv_host_priv *hpriv = host_set->private_data;
817 struct pci_dev *pdev = to_pci_dev(host_set->dev);
819 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
820 pci_disable_msi(pdev);
825 ata_host_stop(host_set);
828 static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
830 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
833 static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
835 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
837 /* set up non-NCQ EDMA configuration */
838 cfg &= ~0x1f; /* clear queue depth */
839 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
840 cfg &= ~(1 << 9); /* disable equeue */
843 cfg |= (1 << 8); /* enab config burst size mask */
845 else if (IS_GEN_II(hpriv))
846 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
848 else if (IS_GEN_IIE(hpriv)) {
849 cfg |= (1 << 23); /* dis RX PM port mask */
850 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
851 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
852 cfg |= (1 << 18); /* enab early completion */
853 cfg |= (1 << 17); /* enab host q cache */
854 cfg |= (1 << 22); /* enab cutthrough */
857 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
861 * mv_port_start - Port specific init/start routine.
862 * @ap: ATA channel to manipulate
864 * Allocate and point to DMA memory, init port private memory,
868 * Inherited from caller.
870 static int mv_port_start(struct ata_port *ap)
872 struct device *dev = ap->host_set->dev;
873 struct mv_host_priv *hpriv = ap->host_set->private_data;
874 struct mv_port_priv *pp;
875 void __iomem *port_mmio = mv_ap_base(ap);
880 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
883 memset(pp, 0, sizeof(*pp));
885 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
889 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
891 rc = ata_pad_alloc(ap, dev);
895 /* First item in chunk of DMA memory:
896 * 32-slot command request table (CRQB), 32 bytes each in size
899 pp->crqb_dma = mem_dma;
901 mem_dma += MV_CRQB_Q_SZ;
904 * 32-slot command response table (CRPB), 8 bytes each in size
907 pp->crpb_dma = mem_dma;
909 mem_dma += MV_CRPB_Q_SZ;
912 * Table of scatter-gather descriptors (ePRD), 16 bytes each
915 pp->sg_tbl_dma = mem_dma;
917 mv_edma_cfg(hpriv, port_mmio);
919 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
920 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
921 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
923 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
924 writelfl(pp->crqb_dma & 0xffffffff,
925 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
927 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
929 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
931 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
932 writelfl(pp->crpb_dma & 0xffffffff,
933 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
935 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
937 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
938 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
940 pp->req_producer = pp->rsp_consumer = 0;
942 /* Don't turn on EDMA here...do it before DMA commands only. Else
943 * we'll be unable to send non-data, PIO, etc due to restricted access
946 ap->private_data = pp;
950 mv_priv_free(pp, dev);
958 * mv_port_stop - Port specific cleanup/stop routine.
959 * @ap: ATA channel to manipulate
961 * Stop DMA, cleanup port memory.
964 * This routine uses the host_set lock to protect the DMA stop.
966 static void mv_port_stop(struct ata_port *ap)
968 struct device *dev = ap->host_set->dev;
969 struct mv_port_priv *pp = ap->private_data;
972 spin_lock_irqsave(&ap->host_set->lock, flags);
974 spin_unlock_irqrestore(&ap->host_set->lock, flags);
976 ap->private_data = NULL;
977 ata_pad_free(ap, dev);
978 mv_priv_free(pp, dev);
983 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
984 * @qc: queued command whose SG list to source from
986 * Populate the SG list and mark the last entry.
989 * Inherited from caller.
991 static void mv_fill_sg(struct ata_queued_cmd *qc)
993 struct mv_port_priv *pp = qc->ap->private_data;
995 struct scatterlist *sg;
997 ata_for_each_sg(sg, qc) {
999 u32 sg_len, len, offset;
1001 addr = sg_dma_address(sg);
1002 sg_len = sg_dma_len(sg);
1005 offset = addr & MV_DMA_BOUNDARY;
1007 if ((offset + sg_len) > 0x10000)
1008 len = 0x10000 - offset;
1010 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
1011 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1012 pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
1017 if (!sg_len && ata_sg_is_last(sg, qc))
1018 pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1025 static inline unsigned mv_inc_q_index(unsigned *index)
1027 *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
1031 static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
1033 *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1034 (last ? CRQB_CMD_LAST : 0);
1038 * mv_qc_prep - Host specific command preparation.
1039 * @qc: queued command to prepare
1041 * This routine simply redirects to the general purpose routine
1042 * if command is not DMA. Else, it handles prep of the CRQB
1043 * (command request block), does some sanity checking, and calls
1044 * the SG load routine.
1047 * Inherited from caller.
1049 static void mv_qc_prep(struct ata_queued_cmd *qc)
1051 struct ata_port *ap = qc->ap;
1052 struct mv_port_priv *pp = ap->private_data;
1054 struct ata_taskfile *tf;
1057 if (ATA_PROT_DMA != qc->tf.protocol)
1060 /* the req producer index should be the same as we remember it */
1061 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1062 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1065 /* Fill in command request block
1067 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1068 flags |= CRQB_FLAG_READ;
1069 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1070 flags |= qc->tag << CRQB_TAG_SHIFT;
1072 pp->crqb[pp->req_producer].sg_addr =
1073 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1074 pp->crqb[pp->req_producer].sg_addr_hi =
1075 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1076 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
1078 cw = &pp->crqb[pp->req_producer].ata_cmd[0];
1081 /* Sadly, the CRQB cannot accomodate all registers--there are
1082 * only 11 bytes...so we must pick and choose required
1083 * registers based on the command. So, we drop feature and
1084 * hob_feature for [RW] DMA commands, but they are needed for
1085 * NCQ. NCQ will drop hob_nsect.
1087 switch (tf->command) {
1089 case ATA_CMD_READ_EXT:
1091 case ATA_CMD_WRITE_EXT:
1092 case ATA_CMD_WRITE_FUA_EXT:
1093 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1095 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1096 case ATA_CMD_FPDMA_READ:
1097 case ATA_CMD_FPDMA_WRITE:
1098 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1099 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1101 #endif /* FIXME: remove this line when NCQ added */
1103 /* The only other commands EDMA supports in non-queued and
1104 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1105 * of which are defined/used by Linux. If we get here, this
1106 * driver needs work.
1108 * FIXME: modify libata to give qc_prep a return value and
1109 * return error here.
1111 BUG_ON(tf->command);
1114 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1115 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1116 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1117 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1118 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1119 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1120 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1121 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1122 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1124 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1130 * mv_qc_prep_iie - Host specific command preparation.
1131 * @qc: queued command to prepare
1133 * This routine simply redirects to the general purpose routine
1134 * if command is not DMA. Else, it handles prep of the CRQB
1135 * (command request block), does some sanity checking, and calls
1136 * the SG load routine.
1139 * Inherited from caller.
1141 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1143 struct ata_port *ap = qc->ap;
1144 struct mv_port_priv *pp = ap->private_data;
1145 struct mv_crqb_iie *crqb;
1146 struct ata_taskfile *tf;
1149 if (ATA_PROT_DMA != qc->tf.protocol)
1152 /* the req producer index should be the same as we remember it */
1153 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1154 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1157 /* Fill in Gen IIE command request block
1159 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1160 flags |= CRQB_FLAG_READ;
1162 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1163 flags |= qc->tag << CRQB_TAG_SHIFT;
1165 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
1166 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1167 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1168 crqb->flags = cpu_to_le32(flags);
1171 crqb->ata_cmd[0] = cpu_to_le32(
1172 (tf->command << 16) |
1175 crqb->ata_cmd[1] = cpu_to_le32(
1181 crqb->ata_cmd[2] = cpu_to_le32(
1182 (tf->hob_lbal << 0) |
1183 (tf->hob_lbam << 8) |
1184 (tf->hob_lbah << 16) |
1185 (tf->hob_feature << 24)
1187 crqb->ata_cmd[3] = cpu_to_le32(
1189 (tf->hob_nsect << 8)
1192 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1198 * mv_qc_issue - Initiate a command to the host
1199 * @qc: queued command to start
1201 * This routine simply redirects to the general purpose routine
1202 * if command is not DMA. Else, it sanity checks our local
1203 * caches of the request producer/consumer indices then enables
1204 * DMA and bumps the request producer index.
1207 * Inherited from caller.
1209 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1211 void __iomem *port_mmio = mv_ap_base(qc->ap);
1212 struct mv_port_priv *pp = qc->ap->private_data;
1215 if (ATA_PROT_DMA != qc->tf.protocol) {
1216 /* We're about to send a non-EDMA capable command to the
1217 * port. Turn off EDMA so there won't be problems accessing
1218 * shadow block, etc registers.
1220 mv_stop_dma(qc->ap);
1221 return ata_qc_issue_prot(qc);
1224 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1226 /* the req producer index should be the same as we remember it */
1227 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1229 /* until we do queuing, the queue should be empty at this point */
1230 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1231 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
1232 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1234 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
1236 mv_start_dma(port_mmio, pp);
1238 /* and write the request in pointer to kick the EDMA to life */
1239 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1240 in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
1241 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1247 * mv_get_crpb_status - get status from most recently completed cmd
1248 * @ap: ATA channel to manipulate
1250 * This routine is for use when the port is in DMA mode, when it
1251 * will be using the CRPB (command response block) method of
1252 * returning command completion information. We check indices
1253 * are good, grab status, and bump the response consumer index to
1254 * prove that we're up to date.
1257 * Inherited from caller.
1259 static u8 mv_get_crpb_status(struct ata_port *ap)
1261 void __iomem *port_mmio = mv_ap_base(ap);
1262 struct mv_port_priv *pp = ap->private_data;
1266 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1268 /* the response consumer index should be the same as we remember it */
1269 WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1272 ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT;
1274 /* increment our consumer index... */
1275 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
1277 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1278 WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
1279 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1282 /* write out our inc'd consumer index so EDMA knows we're caught up */
1283 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1284 out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
1285 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1287 /* Return ATA status register for completed CRPB */
1292 * mv_err_intr - Handle error interrupts on the port
1293 * @ap: ATA channel to manipulate
1295 * In most cases, just clear the interrupt and move on. However,
1296 * some cases require an eDMA reset, which is done right before
1297 * the COMRESET in mv_phy_reset(). The SERR case requires a
1298 * clear of pending errors in the SATA SERROR register. Finally,
1299 * if the port disabled DMA, update our cached copy to match.
1302 * Inherited from caller.
1304 static void mv_err_intr(struct ata_port *ap)
1306 void __iomem *port_mmio = mv_ap_base(ap);
1307 u32 edma_err_cause, serr = 0;
1309 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1311 if (EDMA_ERR_SERR & edma_err_cause) {
1312 serr = scr_read(ap, SCR_ERROR);
1313 scr_write_flush(ap, SCR_ERROR, serr);
1315 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1316 struct mv_port_priv *pp = ap->private_data;
1317 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1319 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1320 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
1322 /* Clear EDMA now that SERR cleanup done */
1323 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1325 /* check for fatal here and recover if needed */
1326 if (EDMA_ERR_FATAL & edma_err_cause) {
1327 mv_stop_and_reset(ap);
1332 * mv_host_intr - Handle all interrupts on the given host controller
1333 * @host_set: host specific structure
1334 * @relevant: port error bits relevant to this host controller
1335 * @hc: which host controller we're to look at
1337 * Read then write clear the HC interrupt status then walk each
1338 * port connected to the HC and see if it needs servicing. Port
1339 * success ints are reported in the HC interrupt status reg, the
1340 * port error ints are reported in the higher level main
1341 * interrupt status register and thus are passed in via the
1342 * 'relevant' argument.
1345 * Inherited from caller.
1347 static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1350 void __iomem *mmio = host_set->mmio_base;
1351 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1352 struct ata_queued_cmd *qc;
1354 int shift, port, port0, hard_port, handled;
1355 unsigned int err_mask;
1360 port0 = MV_PORTS_PER_HC;
1363 /* we'll need the HC success int register in most cases */
1364 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1366 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1369 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1370 hc,relevant,hc_irq_cause);
1372 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1374 struct ata_port *ap = host_set->ports[port];
1375 struct mv_port_priv *pp = ap->private_data;
1377 hard_port = port & MV_PORT_MASK; /* range 0-3 */
1378 handled = 0; /* ensure ata_status is set if handled++ */
1380 /* Note that DEV_IRQ might happen spuriously during EDMA,
1381 * and should be ignored in such cases. We could mask it,
1382 * but it's pretty rare and may not be worth the overhead.
1384 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1385 /* EDMA: check for response queue interrupt */
1386 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1387 ata_status = mv_get_crpb_status(ap);
1391 /* PIO: check for device (drive) interrupt */
1392 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1393 ata_status = readb((void __iomem *)
1394 ap->ioaddr.status_addr);
1399 if (ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))
1402 err_mask = ac_err_mask(ata_status);
1404 shift = port << 1; /* (port * 2) */
1405 if (port >= MV_PORTS_PER_HC) {
1406 shift++; /* skip bit 8 in the HC Main IRQ reg */
1408 if ((PORT0_ERR << shift) & relevant) {
1410 err_mask |= AC_ERR_OTHER;
1415 qc = ata_qc_from_tag(ap, ap->active_tag);
1416 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1417 VPRINTK("port %u IRQ found for qc, "
1418 "ata_status 0x%x\n", port,ata_status);
1419 /* mark qc status appropriately */
1420 if (!(qc->tf.ctl & ATA_NIEN)) {
1421 qc->err_mask |= err_mask;
1422 ata_qc_complete(qc);
1433 * @dev_instance: private data; in this case the host structure
1436 * Read the read only register to determine if any host
1437 * controllers have pending interrupts. If so, call lower level
1438 * routine to handle. Also check for PCI errors which are only
1442 * This routine holds the host_set lock while processing pending
1445 static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1446 struct pt_regs *regs)
1448 struct ata_host_set *host_set = dev_instance;
1449 unsigned int hc, handled = 0, n_hcs;
1450 void __iomem *mmio = host_set->mmio_base;
1453 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1455 /* check the cases where we either have nothing pending or have read
1456 * a bogus register value which can indicate HW removal or PCI fault
1458 if (!irq_stat || (0xffffffffU == irq_stat)) {
1462 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
1463 spin_lock(&host_set->lock);
1465 for (hc = 0; hc < n_hcs; hc++) {
1466 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1468 mv_host_intr(host_set, relevant, hc);
1472 if (PCI_ERR & irq_stat) {
1473 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1474 readl(mmio + PCI_IRQ_CAUSE_OFS));
1476 DPRINTK("All regs @ PCI error\n");
1477 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
1479 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1482 spin_unlock(&host_set->lock);
1484 return IRQ_RETVAL(handled);
1487 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1489 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1490 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1492 return hc_mmio + ofs;
1495 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1499 switch (sc_reg_in) {
1503 ofs = sc_reg_in * sizeof(u32);
1512 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1514 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
1515 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1517 if (ofs != 0xffffffffU)
1518 return readl(mmio + ofs);
1523 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1525 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
1526 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1528 if (ofs != 0xffffffffU)
1529 writelfl(val, mmio + ofs);
1532 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1537 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1539 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1542 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1544 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1547 mv_reset_pci_bus(pdev, mmio);
1550 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1552 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1555 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1558 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1561 tmp = readl(phy_mmio + MV5_PHY_MODE);
1563 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1564 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1567 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1571 writel(0, mmio + MV_GPIO_PORT_CTL);
1573 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1575 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1577 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1580 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1583 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1584 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1586 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1589 tmp = readl(phy_mmio + MV5_LT_MODE);
1591 writel(tmp, phy_mmio + MV5_LT_MODE);
1593 tmp = readl(phy_mmio + MV5_PHY_CTL);
1596 writel(tmp, phy_mmio + MV5_PHY_CTL);
1599 tmp = readl(phy_mmio + MV5_PHY_MODE);
1601 tmp |= hpriv->signal[port].pre;
1602 tmp |= hpriv->signal[port].amps;
1603 writel(tmp, phy_mmio + MV5_PHY_MODE);
1608 #define ZERO(reg) writel(0, port_mmio + (reg))
1609 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1612 void __iomem *port_mmio = mv_port_base(mmio, port);
1614 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1616 mv_channel_reset(hpriv, mmio, port);
1618 ZERO(0x028); /* command */
1619 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1620 ZERO(0x004); /* timer */
1621 ZERO(0x008); /* irq err cause */
1622 ZERO(0x00c); /* irq err mask */
1623 ZERO(0x010); /* rq bah */
1624 ZERO(0x014); /* rq inp */
1625 ZERO(0x018); /* rq outp */
1626 ZERO(0x01c); /* respq bah */
1627 ZERO(0x024); /* respq outp */
1628 ZERO(0x020); /* respq inp */
1629 ZERO(0x02c); /* test control */
1630 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1634 #define ZERO(reg) writel(0, hc_mmio + (reg))
1635 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1638 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1646 tmp = readl(hc_mmio + 0x20);
1649 writel(tmp, hc_mmio + 0x20);
1653 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1656 unsigned int hc, port;
1658 for (hc = 0; hc < n_hc; hc++) {
1659 for (port = 0; port < MV_PORTS_PER_HC; port++)
1660 mv5_reset_hc_port(hpriv, mmio,
1661 (hc * MV_PORTS_PER_HC) + port);
1663 mv5_reset_one_hc(hpriv, mmio, hc);
1670 #define ZERO(reg) writel(0, mmio + (reg))
1671 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1675 tmp = readl(mmio + MV_PCI_MODE);
1677 writel(tmp, mmio + MV_PCI_MODE);
1679 ZERO(MV_PCI_DISC_TIMER);
1680 ZERO(MV_PCI_MSI_TRIGGER);
1681 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1682 ZERO(HC_MAIN_IRQ_MASK_OFS);
1683 ZERO(MV_PCI_SERR_MASK);
1684 ZERO(PCI_IRQ_CAUSE_OFS);
1685 ZERO(PCI_IRQ_MASK_OFS);
1686 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1687 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1688 ZERO(MV_PCI_ERR_ATTRIBUTE);
1689 ZERO(MV_PCI_ERR_COMMAND);
1693 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1697 mv5_reset_flash(hpriv, mmio);
1699 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1701 tmp |= (1 << 5) | (1 << 6);
1702 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1706 * mv6_reset_hc - Perform the 6xxx global soft reset
1707 * @mmio: base address of the HBA
1709 * This routine only applies to 6xxx parts.
1712 * Inherited from caller.
1714 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1717 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1721 /* Following procedure defined in PCI "main command and status
1725 writel(t | STOP_PCI_MASTER, reg);
1727 for (i = 0; i < 1000; i++) {
1730 if (PCI_MASTER_EMPTY & t) {
1734 if (!(PCI_MASTER_EMPTY & t)) {
1735 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1743 writel(t | GLOB_SFT_RST, reg);
1746 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1748 if (!(GLOB_SFT_RST & t)) {
1749 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1754 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1757 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1760 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1762 if (GLOB_SFT_RST & t) {
1763 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1770 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1773 void __iomem *port_mmio;
1776 tmp = readl(mmio + MV_RESET_CFG);
1777 if ((tmp & (1 << 0)) == 0) {
1778 hpriv->signal[idx].amps = 0x7 << 8;
1779 hpriv->signal[idx].pre = 0x1 << 5;
1783 port_mmio = mv_port_base(mmio, idx);
1784 tmp = readl(port_mmio + PHY_MODE2);
1786 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1787 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1790 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1792 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1795 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1798 void __iomem *port_mmio = mv_port_base(mmio, port);
1800 u32 hp_flags = hpriv->hp_flags;
1802 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1804 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1807 if (fix_phy_mode2) {
1808 m2 = readl(port_mmio + PHY_MODE2);
1811 writel(m2, port_mmio + PHY_MODE2);
1815 m2 = readl(port_mmio + PHY_MODE2);
1816 m2 &= ~((1 << 16) | (1 << 31));
1817 writel(m2, port_mmio + PHY_MODE2);
1822 /* who knows what this magic does */
1823 tmp = readl(port_mmio + PHY_MODE3);
1826 writel(tmp, port_mmio + PHY_MODE3);
1828 if (fix_phy_mode4) {
1831 m4 = readl(port_mmio + PHY_MODE4);
1833 if (hp_flags & MV_HP_ERRATA_60X1B2)
1834 tmp = readl(port_mmio + 0x310);
1836 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1838 writel(m4, port_mmio + PHY_MODE4);
1840 if (hp_flags & MV_HP_ERRATA_60X1B2)
1841 writel(tmp, port_mmio + 0x310);
1844 /* Revert values of pre-emphasis and signal amps to the saved ones */
1845 m2 = readl(port_mmio + PHY_MODE2);
1847 m2 &= ~MV_M2_PREAMP_MASK;
1848 m2 |= hpriv->signal[port].amps;
1849 m2 |= hpriv->signal[port].pre;
1852 /* according to mvSata 3.6.1, some IIE values are fixed */
1853 if (IS_GEN_IIE(hpriv)) {
1858 writel(m2, port_mmio + PHY_MODE2);
1861 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1862 unsigned int port_no)
1864 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1866 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1868 if (IS_60XX(hpriv)) {
1869 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1870 ifctl |= (1 << 12) | (1 << 7);
1871 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1874 udelay(25); /* allow reset propagation */
1876 /* Spec never mentions clearing the bit. Marvell's driver does
1877 * clear the bit, however.
1879 writelfl(0, port_mmio + EDMA_CMD_OFS);
1881 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1887 static void mv_stop_and_reset(struct ata_port *ap)
1889 struct mv_host_priv *hpriv = ap->host_set->private_data;
1890 void __iomem *mmio = ap->host_set->mmio_base;
1894 mv_channel_reset(hpriv, mmio, ap->port_no);
1896 __mv_phy_reset(ap, 0);
1899 static inline void __msleep(unsigned int msec, int can_sleep)
1908 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1909 * @ap: ATA channel to manipulate
1911 * Part of this is taken from __sata_phy_reset and modified to
1912 * not sleep since this routine gets called from interrupt level.
1915 * Inherited from caller. This is coded to safe to call at
1916 * interrupt level, i.e. it does not sleep.
1918 static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1920 struct mv_port_priv *pp = ap->private_data;
1921 struct mv_host_priv *hpriv = ap->host_set->private_data;
1922 void __iomem *port_mmio = mv_ap_base(ap);
1923 struct ata_taskfile tf;
1924 struct ata_device *dev = &ap->device[0];
1925 unsigned long timeout;
1929 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1931 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1932 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1933 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1935 /* Issue COMRESET via SControl */
1937 scr_write_flush(ap, SCR_CONTROL, 0x301);
1938 __msleep(1, can_sleep);
1940 scr_write_flush(ap, SCR_CONTROL, 0x300);
1941 __msleep(20, can_sleep);
1943 timeout = jiffies + msecs_to_jiffies(200);
1945 sstatus = scr_read(ap, SCR_STATUS) & 0x3;
1946 if ((sstatus == 3) || (sstatus == 0))
1949 __msleep(1, can_sleep);
1950 } while (time_before(jiffies, timeout));
1952 /* work around errata */
1953 if (IS_60XX(hpriv) &&
1954 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1956 goto comreset_retry;
1958 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1959 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1960 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1962 if (sata_dev_present(ap)) {
1965 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1966 ap->id, scr_read(ap, SCR_STATUS));
1967 ata_port_disable(ap);
1970 ap->cbl = ATA_CBL_SATA;
1972 /* even after SStatus reflects that device is ready,
1973 * it seems to take a while for link to be fully
1974 * established (and thus Status no longer 0x80/0x7F),
1975 * so we poll a bit for that, here.
1979 u8 drv_stat = ata_check_status(ap);
1980 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
1982 __msleep(500, can_sleep);
1987 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
1988 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
1989 tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
1990 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
1992 dev->class = ata_dev_classify(&tf);
1993 if (!ata_dev_enabled(dev)) {
1994 VPRINTK("Port disabled post-sig: No device present.\n");
1995 ata_port_disable(ap);
1998 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2000 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2005 static void mv_phy_reset(struct ata_port *ap)
2007 __mv_phy_reset(ap, 1);
2011 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2012 * @ap: ATA channel to manipulate
2014 * Intent is to clear all pending error conditions, reset the
2015 * chip/bus, fail the command, and move on.
2018 * This routine holds the host_set lock while failing the command.
2020 static void mv_eng_timeout(struct ata_port *ap)
2022 struct ata_queued_cmd *qc;
2024 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
2025 DPRINTK("All regs @ start of eng_timeout\n");
2026 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
2027 to_pci_dev(ap->host_set->dev));
2029 qc = ata_qc_from_tag(ap, ap->active_tag);
2030 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2031 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
2032 &qc->scsicmd->cmnd);
2035 mv_stop_and_reset(ap);
2037 qc->err_mask |= AC_ERR_TIMEOUT;
2038 ata_eh_qc_complete(qc);
2042 * mv_port_init - Perform some early initialization on a single port.
2043 * @port: libata data structure storing shadow register addresses
2044 * @port_mmio: base address of the port
2046 * Initialize shadow register mmio addresses, clear outstanding
2047 * interrupts on the port, and unmask interrupts for the future
2048 * start of the port.
2051 * Inherited from caller.
2053 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2055 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
2058 /* PIO related setup
2060 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2062 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2063 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2064 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2065 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2066 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2067 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2069 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2070 /* special case: control/altstatus doesn't have ATA_REG_ address */
2071 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2074 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
2076 /* Clear any currently outstanding port interrupt conditions */
2077 serr_ofs = mv_scr_offset(SCR_ERROR);
2078 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2079 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2081 /* unmask all EDMA error interrupts */
2082 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2084 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2085 readl(port_mmio + EDMA_CFG_OFS),
2086 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2087 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2090 static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
2091 unsigned int board_idx)
2094 u32 hp_flags = hpriv->hp_flags;
2096 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2100 hpriv->ops = &mv5xxx_ops;
2101 hp_flags |= MV_HP_50XX;
2105 hp_flags |= MV_HP_ERRATA_50XXB0;
2108 hp_flags |= MV_HP_ERRATA_50XXB2;
2111 dev_printk(KERN_WARNING, &pdev->dev,
2112 "Applying 50XXB2 workarounds to unknown rev\n");
2113 hp_flags |= MV_HP_ERRATA_50XXB2;
2120 hpriv->ops = &mv5xxx_ops;
2121 hp_flags |= MV_HP_50XX;
2125 hp_flags |= MV_HP_ERRATA_50XXB0;
2128 hp_flags |= MV_HP_ERRATA_50XXB2;
2131 dev_printk(KERN_WARNING, &pdev->dev,
2132 "Applying B2 workarounds to unknown rev\n");
2133 hp_flags |= MV_HP_ERRATA_50XXB2;
2140 hpriv->ops = &mv6xxx_ops;
2144 hp_flags |= MV_HP_ERRATA_60X1B2;
2147 hp_flags |= MV_HP_ERRATA_60X1C0;
2150 dev_printk(KERN_WARNING, &pdev->dev,
2151 "Applying B2 workarounds to unknown rev\n");
2152 hp_flags |= MV_HP_ERRATA_60X1B2;
2159 hpriv->ops = &mv6xxx_ops;
2161 hp_flags |= MV_HP_GEN_IIE;
2165 hp_flags |= MV_HP_ERRATA_XX42A0;
2168 hp_flags |= MV_HP_ERRATA_60X1C0;
2171 dev_printk(KERN_WARNING, &pdev->dev,
2172 "Applying 60X1C0 workarounds to unknown rev\n");
2173 hp_flags |= MV_HP_ERRATA_60X1C0;
2179 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2183 hpriv->hp_flags = hp_flags;
2189 * mv_init_host - Perform some early initialization of the host.
2190 * @pdev: host PCI device
2191 * @probe_ent: early data struct representing the host
2193 * If possible, do an early global reset of the host. Then do
2194 * our port init and clear/unmask all/relevant host interrupts.
2197 * Inherited from caller.
2199 static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2200 unsigned int board_idx)
2202 int rc = 0, n_hc, port, hc;
2203 void __iomem *mmio = probe_ent->mmio_base;
2204 struct mv_host_priv *hpriv = probe_ent->private_data;
2206 /* global interrupt mask */
2207 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2209 rc = mv_chip_id(pdev, hpriv, board_idx);
2213 n_hc = mv_get_hc_count(probe_ent->host_flags);
2214 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
2216 for (port = 0; port < probe_ent->n_ports; port++)
2217 hpriv->ops->read_preamp(hpriv, port, mmio);
2219 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2223 hpriv->ops->reset_flash(hpriv, mmio);
2224 hpriv->ops->reset_bus(pdev, mmio);
2225 hpriv->ops->enable_leds(hpriv, mmio);
2227 for (port = 0; port < probe_ent->n_ports; port++) {
2228 if (IS_60XX(hpriv)) {
2229 void __iomem *port_mmio = mv_port_base(mmio, port);
2231 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2233 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2236 hpriv->ops->phy_errata(hpriv, mmio, port);
2239 for (port = 0; port < probe_ent->n_ports; port++) {
2240 void __iomem *port_mmio = mv_port_base(mmio, port);
2241 mv_port_init(&probe_ent->port[port], port_mmio);
2244 for (hc = 0; hc < n_hc; hc++) {
2245 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2247 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2248 "(before clear)=0x%08x\n", hc,
2249 readl(hc_mmio + HC_CFG_OFS),
2250 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2252 /* Clear any currently outstanding hc interrupt conditions */
2253 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2256 /* Clear any currently outstanding host interrupt conditions */
2257 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2259 /* and unmask interrupt generation for host regs */
2260 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2261 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2263 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2264 "PCI int cause/mask=0x%08x/0x%08x\n",
2265 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2266 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2267 readl(mmio + PCI_IRQ_CAUSE_OFS),
2268 readl(mmio + PCI_IRQ_MASK_OFS));
2275 * mv_print_info - Dump key info to kernel log for perusal.
2276 * @probe_ent: early data struct representing the host
2278 * FIXME: complete this.
2281 * Inherited from caller.
2283 static void mv_print_info(struct ata_probe_ent *probe_ent)
2285 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
2286 struct mv_host_priv *hpriv = probe_ent->private_data;
2290 /* Use this to determine the HW stepping of the chip so we know
2291 * what errata to workaround
2293 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2295 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2298 else if (scc == 0x01)
2303 dev_printk(KERN_INFO, &pdev->dev,
2304 "%u slots %u ports %s mode IRQ via %s\n",
2305 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
2306 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2310 * mv_init_one - handle a positive probe of a Marvell host
2311 * @pdev: PCI device found
2312 * @ent: PCI device ID entry for the matched host
2315 * Inherited from caller.
2317 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2319 static int printed_version = 0;
2320 struct ata_probe_ent *probe_ent = NULL;
2321 struct mv_host_priv *hpriv;
2322 unsigned int board_idx = (unsigned int)ent->driver_data;
2323 void __iomem *mmio_base;
2324 int pci_dev_busy = 0, rc;
2326 if (!printed_version++)
2327 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2329 rc = pci_enable_device(pdev);
2334 rc = pci_request_regions(pdev, DRV_NAME);
2340 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
2341 if (probe_ent == NULL) {
2343 goto err_out_regions;
2346 memset(probe_ent, 0, sizeof(*probe_ent));
2347 probe_ent->dev = pci_dev_to_dev(pdev);
2348 INIT_LIST_HEAD(&probe_ent->node);
2350 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
2351 if (mmio_base == NULL) {
2353 goto err_out_free_ent;
2356 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
2359 goto err_out_iounmap;
2361 memset(hpriv, 0, sizeof(*hpriv));
2363 probe_ent->sht = mv_port_info[board_idx].sht;
2364 probe_ent->host_flags = mv_port_info[board_idx].host_flags;
2365 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
2366 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
2367 probe_ent->port_ops = mv_port_info[board_idx].port_ops;
2369 probe_ent->irq = pdev->irq;
2370 probe_ent->irq_flags = SA_SHIRQ;
2371 probe_ent->mmio_base = mmio_base;
2372 probe_ent->private_data = hpriv;
2374 /* initialize adapter */
2375 rc = mv_init_host(pdev, probe_ent, board_idx);
2380 /* Enable interrupts */
2381 if (msi && pci_enable_msi(pdev) == 0) {
2382 hpriv->hp_flags |= MV_HP_FLAG_MSI;
2387 mv_dump_pci_cfg(pdev, 0x68);
2388 mv_print_info(probe_ent);
2390 if (ata_device_add(probe_ent) == 0) {
2391 rc = -ENODEV; /* No devices discovered */
2392 goto err_out_dev_add;
2399 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
2400 pci_disable_msi(pdev);
2407 pci_iounmap(pdev, mmio_base);
2411 pci_release_regions(pdev);
2413 if (!pci_dev_busy) {
2414 pci_disable_device(pdev);
2420 static int __init mv_init(void)
2422 return pci_module_init(&mv_pci_driver);
2425 static void __exit mv_exit(void)
2427 pci_unregister_driver(&mv_pci_driver);
2430 MODULE_AUTHOR("Brett Russ");
2431 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2432 MODULE_LICENSE("GPL");
2433 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2434 MODULE_VERSION(DRV_VERSION);
2436 module_param(msi, int, 0444);
2437 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2439 module_init(mv_init);
2440 module_exit(mv_exit);