blob: 8c554f2e69b0fe4aac1a7dec4115e07350114439 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
33 probing/error handling in general. MUST HAVE.
34
35 3) Add hotplug support (easy, once new-EH support appears)
36
37 4) Add NCQ support (easy to intermediate, once new-EH support appears)
38
39 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
40
41 6) Add port multiplier support (intermediate)
42
43 7) Test and verify 3.0 Gbps support
44
45 8) Develop a low-power-consumption strategy, and implement it.
46
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
49 like that.
50
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
55
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
59
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
62
63 13) Verify that 7042 is fully supported. I only have a 6042.
64
65*/
66
67
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/kernel.h>
69#include <linux/module.h>
70#include <linux/pci.h>
71#include <linux/init.h>
72#include <linux/blkdev.h>
73#include <linux/delay.h>
74#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050076#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050078#include <scsi/scsi_cmnd.h>
Brett Russ20f733e2005-09-01 18:26:17 -040079#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080
81#define DRV_NAME "sata_mv"
Jeff Garzik8bc3fc42007-05-21 20:26:38 -040082#define DRV_VERSION "0.81"
Brett Russ20f733e2005-09-01 18:26:17 -040083
84enum {
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
87 MV_IO_BAR = 2, /* offset 0x18: IO space */
88 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
89
90 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
92
93 MV_PCI_REG_BASE = 0,
94 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040095 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
96 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
97 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
98 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
100
Brett Russ20f733e2005-09-01 18:26:17 -0400101 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500102 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500103 MV_GPIO_PORT_CTL = 0x104f0,
104 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400105
106 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
107 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
110
Brett Russ31961942005-09-30 01:36:00 -0400111 MV_MAX_Q_DEPTH = 32,
112 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
113
114 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
115 * CRPB needs alignment on a 256B boundary. Size == 256B
116 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
121 MV_MAX_SG_CT = 176,
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
123 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
124
Brett Russ20f733e2005-09-01 18:26:17 -0400125 MV_PORTS_PER_HC = 4,
126 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
127 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400128 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400129 MV_PORT_MASK = 3,
130
131 /* Host Flags */
132 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
133 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400134 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
135 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
136 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500137 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400138
Brett Russ31961942005-09-30 01:36:00 -0400139 CRQB_FLAG_READ = (1 << 0),
140 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400141 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
142 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400143 CRQB_CMD_ADDR_SHIFT = 8,
144 CRQB_CMD_CS = (0x2 << 11),
145 CRQB_CMD_LAST = (1 << 15),
146
147 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400148 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
149 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400150
151 EPRD_FLAG_END_OF_TBL = (1 << 31),
152
Brett Russ20f733e2005-09-01 18:26:17 -0400153 /* PCI interface registers */
154
Brett Russ31961942005-09-30 01:36:00 -0400155 PCI_COMMAND_OFS = 0xc00,
156
Brett Russ20f733e2005-09-01 18:26:17 -0400157 PCI_MAIN_CMD_STS_OFS = 0xd30,
158 STOP_PCI_MASTER = (1 << 2),
159 PCI_MASTER_EMPTY = (1 << 3),
160 GLOB_SFT_RST = (1 << 4),
161
Jeff Garzik522479f2005-11-12 22:14:02 -0500162 MV_PCI_MODE = 0xd00,
163 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
164 MV_PCI_DISC_TIMER = 0xd04,
165 MV_PCI_MSI_TRIGGER = 0xc38,
166 MV_PCI_SERR_MASK = 0xc28,
167 MV_PCI_XBAR_TMOUT = 0x1d04,
168 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
169 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
170 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
171 MV_PCI_ERR_COMMAND = 0x1d50,
172
173 PCI_IRQ_CAUSE_OFS = 0x1d58,
174 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400175 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
176
177 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
178 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
179 PORT0_ERR = (1 << 0), /* shift by port # */
180 PORT0_DONE = (1 << 1), /* shift by port # */
181 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
182 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
183 PCI_ERR = (1 << 18),
184 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
185 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500186 PORTS_0_3_COAL_DONE = (1 << 8),
187 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400188 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
189 GPIO_INT = (1 << 22),
190 SELF_INT = (1 << 23),
191 TWSI_INT = (1 << 24),
192 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500193 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500194 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400195 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
196 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500197 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
198 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400199
200 /* SATAHC registers */
201 HC_CFG_OFS = 0,
202
203 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400204 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400205 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
206 DEV_IRQ = (1 << 8), /* shift by port # */
207
208 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400209 SHD_BLK_OFS = 0x100,
210 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400211
212 /* SATA registers */
213 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
214 SATA_ACTIVE_OFS = 0x350,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500215 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500216 PHY_MODE4 = 0x314,
217 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500218 MV5_PHY_MODE = 0x74,
219 MV5_LT_MODE = 0x30,
220 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500221 SATA_INTERFACE_CTL = 0x050,
222
223 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400224
225 /* Port registers */
226 EDMA_CFG_OFS = 0,
Brett Russ31961942005-09-30 01:36:00 -0400227 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
228 EDMA_CFG_NCQ = (1 << 5),
229 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
230 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
231 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400232
233 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
234 EDMA_ERR_IRQ_MASK_OFS = 0xc,
235 EDMA_ERR_D_PAR = (1 << 0),
236 EDMA_ERR_PRD_PAR = (1 << 1),
237 EDMA_ERR_DEV = (1 << 2),
238 EDMA_ERR_DEV_DCON = (1 << 3),
239 EDMA_ERR_DEV_CON = (1 << 4),
240 EDMA_ERR_SERR = (1 << 5),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400241 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
242 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Brett Russ20f733e2005-09-01 18:26:17 -0400243 EDMA_ERR_BIST_ASYNC = (1 << 8),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400244 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Brett Russ20f733e2005-09-01 18:26:17 -0400245 EDMA_ERR_CRBQ_PAR = (1 << 9),
246 EDMA_ERR_CRPB_PAR = (1 << 10),
247 EDMA_ERR_INTRL_PAR = (1 << 11),
248 EDMA_ERR_IORDY = (1 << 12),
249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
250 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
251 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
252 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
253 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
254 EDMA_ERR_TRANS_PROTO = (1 << 31),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400255 EDMA_ERR_OVERRUN_5 = (1 << 5),
256 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Jeff Garzik8b260242005-11-12 12:32:50 -0500257 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Brett Russ20f733e2005-09-01 18:26:17 -0400258 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
259 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
Jeff Garzik8b260242005-11-12 12:32:50 -0500260 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
Brett Russ20f733e2005-09-01 18:26:17 -0400261 EDMA_ERR_LNK_DATA_RX |
Jeff Garzik8b260242005-11-12 12:32:50 -0500262 EDMA_ERR_LNK_DATA_TX |
Brett Russ20f733e2005-09-01 18:26:17 -0400263 EDMA_ERR_TRANS_PROTO),
264
Brett Russ31961942005-09-30 01:36:00 -0400265 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
266 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400267
268 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
269 EDMA_REQ_Q_PTR_SHIFT = 5,
270
271 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
272 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
273 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400274 EDMA_RSP_Q_PTR_SHIFT = 3,
275
Brett Russ20f733e2005-09-01 18:26:17 -0400276 EDMA_CMD_OFS = 0x28,
277 EDMA_EN = (1 << 0),
278 EDMA_DS = (1 << 1),
279 ATA_RST = (1 << 2),
280
Jeff Garzikc9d39132005-11-13 17:47:51 -0500281 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500282 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500283
Brett Russ31961942005-09-30 01:36:00 -0400284 /* Host private flags (hp_flags) */
285 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500286 MV_HP_ERRATA_50XXB0 = (1 << 1),
287 MV_HP_ERRATA_50XXB2 = (1 << 2),
288 MV_HP_ERRATA_60X1B2 = (1 << 3),
289 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500290 MV_HP_ERRATA_XX42A0 = (1 << 5),
291 MV_HP_50XX = (1 << 6),
292 MV_HP_GEN_IIE = (1 << 7),
Brett Russ20f733e2005-09-01 18:26:17 -0400293
Brett Russ31961942005-09-30 01:36:00 -0400294 /* Port private flags (pp_flags) */
295 MV_PP_FLAG_EDMA_EN = (1 << 0),
296 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400297 MV_PP_FLAG_HAD_A_RESET = (1 << 2),
Brett Russ31961942005-09-30 01:36:00 -0400298};
299
Jeff Garzikc9d39132005-11-13 17:47:51 -0500300#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500301#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500302#define IS_GEN_I(hpriv) IS_50XX(hpriv)
303#define IS_GEN_II(hpriv) IS_60XX(hpriv)
304#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500305
Jeff Garzik095fec82005-11-12 09:50:49 -0500306enum {
Jeff Garzikd88184f2007-02-26 01:26:06 -0500307 MV_DMA_BOUNDARY = 0xffffffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500308
309 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
310
311 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
312};
313
Jeff Garzik522479f2005-11-12 22:14:02 -0500314enum chip_type {
315 chip_504x,
316 chip_508x,
317 chip_5080,
318 chip_604x,
319 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500320 chip_6042,
321 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500322};
323
Brett Russ31961942005-09-30 01:36:00 -0400324/* Command ReQuest Block: 32B */
325struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400326 __le32 sg_addr;
327 __le32 sg_addr_hi;
328 __le16 ctrl_flags;
329 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400330};
331
Jeff Garzike4e7b892006-01-31 12:18:41 -0500332struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400333 __le32 addr;
334 __le32 addr_hi;
335 __le32 flags;
336 __le32 len;
337 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500338};
339
Brett Russ31961942005-09-30 01:36:00 -0400340/* Command ResPonse Block: 8B */
341struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400342 __le16 id;
343 __le16 flags;
344 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400345};
346
347/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
348struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400349 __le32 addr;
350 __le32 flags_size;
351 __le32 addr_hi;
352 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400353};
354
355struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400356 struct mv_crqb *crqb;
357 dma_addr_t crqb_dma;
358 struct mv_crpb *crpb;
359 dma_addr_t crpb_dma;
360 struct mv_sg *sg_tbl;
361 dma_addr_t sg_tbl_dma;
Brett Russ31961942005-09-30 01:36:00 -0400362 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400363};
364
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500365struct mv_port_signal {
366 u32 amps;
367 u32 pre;
368};
369
Jeff Garzik47c2b672005-11-12 21:13:17 -0500370struct mv_host_priv;
371struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500372 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
373 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500374 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
375 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
376 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500377 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
378 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500379 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
380 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500381};
382
Brett Russ20f733e2005-09-01 18:26:17 -0400383struct mv_host_priv {
Brett Russ31961942005-09-30 01:36:00 -0400384 u32 hp_flags;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500385 struct mv_port_signal signal[8];
Jeff Garzik47c2b672005-11-12 21:13:17 -0500386 const struct mv_hw_ops *ops;
Brett Russ20f733e2005-09-01 18:26:17 -0400387};
388
389static void mv_irq_clear(struct ata_port *ap);
390static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
391static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500392static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
393static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ20f733e2005-09-01 18:26:17 -0400394static void mv_phy_reset(struct ata_port *ap);
Jeff Garzik22374672005-11-17 10:59:48 -0500395static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
Brett Russ31961942005-09-30 01:36:00 -0400396static int mv_port_start(struct ata_port *ap);
397static void mv_port_stop(struct ata_port *ap);
398static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500399static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900400static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Brett Russ31961942005-09-30 01:36:00 -0400401static void mv_eng_timeout(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400402static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
403
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500404static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
405 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500406static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
407static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
408 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500409static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
410 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500411static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
412static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500413
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500414static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
415 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500416static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
417static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
418 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500419static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
420 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500421static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
422static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500423static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
424 unsigned int port_no);
425static void mv_stop_and_reset(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500426
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400427static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400428 .module = THIS_MODULE,
429 .name = DRV_NAME,
430 .ioctl = ata_scsi_ioctl,
431 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400432 .can_queue = ATA_DEF_QUEUE,
433 .this_id = ATA_SHT_THIS_ID,
434 .sg_tablesize = MV_MAX_SG_CT,
435 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
436 .emulated = ATA_SHT_EMULATED,
437 .use_clustering = 1,
438 .proc_name = DRV_NAME,
439 .dma_boundary = MV_DMA_BOUNDARY,
440 .slave_configure = ata_scsi_slave_config,
441 .slave_destroy = ata_scsi_slave_destroy,
442 .bios_param = ata_std_bios_param,
443};
444
445static struct scsi_host_template mv6_sht = {
446 .module = THIS_MODULE,
447 .name = DRV_NAME,
448 .ioctl = ata_scsi_ioctl,
449 .queuecommand = ata_scsi_queuecmd,
450 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400451 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500452 .sg_tablesize = MV_MAX_SG_CT,
Brett Russ20f733e2005-09-01 18:26:17 -0400453 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
454 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500455 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400456 .proc_name = DRV_NAME,
457 .dma_boundary = MV_DMA_BOUNDARY,
458 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900459 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400460 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400461};
462
Jeff Garzikc9d39132005-11-13 17:47:51 -0500463static const struct ata_port_operations mv5_ops = {
464 .port_disable = ata_port_disable,
465
466 .tf_load = ata_tf_load,
467 .tf_read = ata_tf_read,
468 .check_status = ata_check_status,
469 .exec_command = ata_exec_command,
470 .dev_select = ata_std_dev_select,
471
472 .phy_reset = mv_phy_reset,
Jeff Garzikcffacd82007-03-09 09:46:47 -0500473 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500474
475 .qc_prep = mv_qc_prep,
476 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900477 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500478
479 .eng_timeout = mv_eng_timeout,
480
Jeff Garzikc9d39132005-11-13 17:47:51 -0500481 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900482 .irq_on = ata_irq_on,
483 .irq_ack = ata_irq_ack,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500484
485 .scr_read = mv5_scr_read,
486 .scr_write = mv5_scr_write,
487
488 .port_start = mv_port_start,
489 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500490};
491
492static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400493 .port_disable = ata_port_disable,
494
495 .tf_load = ata_tf_load,
496 .tf_read = ata_tf_read,
497 .check_status = ata_check_status,
498 .exec_command = ata_exec_command,
499 .dev_select = ata_std_dev_select,
500
501 .phy_reset = mv_phy_reset,
Jeff Garzikcffacd82007-03-09 09:46:47 -0500502 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400503
Brett Russ31961942005-09-30 01:36:00 -0400504 .qc_prep = mv_qc_prep,
505 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900506 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400507
Brett Russ31961942005-09-30 01:36:00 -0400508 .eng_timeout = mv_eng_timeout,
Brett Russ20f733e2005-09-01 18:26:17 -0400509
Brett Russ20f733e2005-09-01 18:26:17 -0400510 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900511 .irq_on = ata_irq_on,
512 .irq_ack = ata_irq_ack,
Brett Russ20f733e2005-09-01 18:26:17 -0400513
514 .scr_read = mv_scr_read,
515 .scr_write = mv_scr_write,
516
Brett Russ31961942005-09-30 01:36:00 -0400517 .port_start = mv_port_start,
518 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400519};
520
Jeff Garzike4e7b892006-01-31 12:18:41 -0500521static const struct ata_port_operations mv_iie_ops = {
522 .port_disable = ata_port_disable,
523
524 .tf_load = ata_tf_load,
525 .tf_read = ata_tf_read,
526 .check_status = ata_check_status,
527 .exec_command = ata_exec_command,
528 .dev_select = ata_std_dev_select,
529
530 .phy_reset = mv_phy_reset,
Jeff Garzikcffacd82007-03-09 09:46:47 -0500531 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500532
533 .qc_prep = mv_qc_prep_iie,
534 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900535 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500536
537 .eng_timeout = mv_eng_timeout,
538
Jeff Garzike4e7b892006-01-31 12:18:41 -0500539 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900540 .irq_on = ata_irq_on,
541 .irq_ack = ata_irq_ack,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500542
543 .scr_read = mv_scr_read,
544 .scr_write = mv_scr_write,
545
546 .port_start = mv_port_start,
547 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500548};
549
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100550static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400551 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400552 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400553 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400554 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500555 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400556 },
557 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400558 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400559 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400560 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500561 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400562 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500563 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400564 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500565 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400566 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500567 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500568 },
Brett Russ20f733e2005-09-01 18:26:17 -0400569 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400570 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400571 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400572 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500573 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400574 },
575 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400576 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
577 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400578 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400579 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500580 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400581 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500582 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400583 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500584 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400585 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500586 .port_ops = &mv_iie_ops,
587 },
588 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400589 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500590 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400591 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500592 .port_ops = &mv_iie_ops,
593 },
Brett Russ20f733e2005-09-01 18:26:17 -0400594};
595
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500596static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400597 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
598 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
599 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
600 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400601
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400602 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
603 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
604 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
605 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
606 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500607
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400608 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
609
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200610 /* Adaptec 1430SA */
611 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
612
Olof Johanssone93f09d2007-01-18 18:39:59 -0600613 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
614
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800615 /* add Marvell 7042 support */
616 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
617
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400618 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400619};
620
621static struct pci_driver mv_pci_driver = {
622 .name = DRV_NAME,
623 .id_table = mv_pci_tbl,
624 .probe = mv_init_one,
625 .remove = ata_pci_remove_one,
626};
627
Jeff Garzik47c2b672005-11-12 21:13:17 -0500628static const struct mv_hw_ops mv5xxx_ops = {
629 .phy_errata = mv5_phy_errata,
630 .enable_leds = mv5_enable_leds,
631 .read_preamp = mv5_read_preamp,
632 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500633 .reset_flash = mv5_reset_flash,
634 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500635};
636
637static const struct mv_hw_ops mv6xxx_ops = {
638 .phy_errata = mv6_phy_errata,
639 .enable_leds = mv6_enable_leds,
640 .read_preamp = mv6_read_preamp,
641 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500642 .reset_flash = mv6_reset_flash,
643 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500644};
645
Brett Russ20f733e2005-09-01 18:26:17 -0400646/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500647 * module options
648 */
649static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
650
651
Jeff Garzikd88184f2007-02-26 01:26:06 -0500652/* move to PCI layer or libata core? */
653static int pci_go_64(struct pci_dev *pdev)
654{
655 int rc;
656
657 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
658 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
659 if (rc) {
660 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
661 if (rc) {
662 dev_printk(KERN_ERR, &pdev->dev,
663 "64-bit DMA enable failed\n");
664 return rc;
665 }
666 }
667 } else {
668 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
669 if (rc) {
670 dev_printk(KERN_ERR, &pdev->dev,
671 "32-bit DMA enable failed\n");
672 return rc;
673 }
674 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
675 if (rc) {
676 dev_printk(KERN_ERR, &pdev->dev,
677 "32-bit consistent DMA enable failed\n");
678 return rc;
679 }
680 }
681
682 return rc;
683}
684
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500685/*
Brett Russ20f733e2005-09-01 18:26:17 -0400686 * Functions
687 */
688
689static inline void writelfl(unsigned long data, void __iomem *addr)
690{
691 writel(data, addr);
692 (void) readl(addr); /* flush to avoid PCI posted write */
693}
694
Brett Russ20f733e2005-09-01 18:26:17 -0400695static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
696{
697 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
698}
699
Jeff Garzikc9d39132005-11-13 17:47:51 -0500700static inline unsigned int mv_hc_from_port(unsigned int port)
701{
702 return port >> MV_PORT_HC_SHIFT;
703}
704
705static inline unsigned int mv_hardport_from_port(unsigned int port)
706{
707 return port & MV_PORT_MASK;
708}
709
710static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
711 unsigned int port)
712{
713 return mv_hc_base(base, mv_hc_from_port(port));
714}
715
Brett Russ20f733e2005-09-01 18:26:17 -0400716static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
717{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500718 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500719 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500720 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400721}
722
723static inline void __iomem *mv_ap_base(struct ata_port *ap)
724{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900725 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400726}
727
Jeff Garzikcca39742006-08-24 03:19:22 -0400728static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400729{
Jeff Garzikcca39742006-08-24 03:19:22 -0400730 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400731}
732
733static void mv_irq_clear(struct ata_port *ap)
734{
735}
736
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400737static void mv_set_edma_ptrs(void __iomem *port_mmio,
738 struct mv_host_priv *hpriv,
739 struct mv_port_priv *pp)
740{
741 /*
742 * initialize request queue
743 */
744 WARN_ON(pp->crqb_dma & 0x3ff);
745 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
746 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
747 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
748
749 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
750 writelfl(pp->crqb_dma & 0xffffffff,
751 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
752 else
753 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
754
755 /*
756 * initialize response queue
757 */
758 WARN_ON(pp->crpb_dma & 0xff);
759 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
760
761 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
762 writelfl(pp->crpb_dma & 0xffffffff,
763 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
764 else
765 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
766
767 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
768 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
769
770}
771
Brett Russ05b308e2005-10-05 17:08:53 -0400772/**
773 * mv_start_dma - Enable eDMA engine
774 * @base: port base address
775 * @pp: port private data
776 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900777 * Verify the local cache of the eDMA state is accurate with a
778 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400779 *
780 * LOCKING:
781 * Inherited from caller.
782 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400783static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
784 struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400785{
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400786 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Brett Russafb0edd2005-10-05 17:08:42 -0400787 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
788 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
789 }
Tejun Heobeec7db2006-02-11 19:11:13 +0900790 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400791}
792
Brett Russ05b308e2005-10-05 17:08:53 -0400793/**
794 * mv_stop_dma - Disable eDMA engine
795 * @ap: ATA channel to manipulate
796 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900797 * Verify the local cache of the eDMA state is accurate with a
798 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400799 *
800 * LOCKING:
801 * Inherited from caller.
802 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400803static int mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400804{
805 void __iomem *port_mmio = mv_ap_base(ap);
806 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400807 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400808 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400809
Brett Russafb0edd2005-10-05 17:08:42 -0400810 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
811 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400812 */
Brett Russ31961942005-09-30 01:36:00 -0400813 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
814 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400815 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900816 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Brett Russafb0edd2005-10-05 17:08:42 -0400817 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500818
Brett Russ31961942005-09-30 01:36:00 -0400819 /* now properly wait for the eDMA to stop */
820 for (i = 1000; i > 0; i--) {
821 reg = readl(port_mmio + EDMA_CMD_OFS);
822 if (!(EDMA_EN & reg)) {
823 break;
824 }
825 udelay(100);
826 }
827
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400828 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900829 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Brett Russafb0edd2005-10-05 17:08:42 -0400830 /* FIXME: Consider doing a reset here to recover */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400831 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400832 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400833
834 return err;
Brett Russ31961942005-09-30 01:36:00 -0400835}
836
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400837#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400838static void mv_dump_mem(void __iomem *start, unsigned bytes)
839{
Brett Russ31961942005-09-30 01:36:00 -0400840 int b, w;
841 for (b = 0; b < bytes; ) {
842 DPRINTK("%p: ", start + b);
843 for (w = 0; b < bytes && w < 4; w++) {
844 printk("%08x ",readl(start + b));
845 b += sizeof(u32);
846 }
847 printk("\n");
848 }
Brett Russ31961942005-09-30 01:36:00 -0400849}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400850#endif
851
Brett Russ31961942005-09-30 01:36:00 -0400852static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
853{
854#ifdef ATA_DEBUG
855 int b, w;
856 u32 dw;
857 for (b = 0; b < bytes; ) {
858 DPRINTK("%02x: ", b);
859 for (w = 0; b < bytes && w < 4; w++) {
860 (void) pci_read_config_dword(pdev,b,&dw);
861 printk("%08x ",dw);
862 b += sizeof(u32);
863 }
864 printk("\n");
865 }
866#endif
867}
868static void mv_dump_all_regs(void __iomem *mmio_base, int port,
869 struct pci_dev *pdev)
870{
871#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500872 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400873 port >> MV_PORT_HC_SHIFT);
874 void __iomem *port_base;
875 int start_port, num_ports, p, start_hc, num_hcs, hc;
876
877 if (0 > port) {
878 start_hc = start_port = 0;
879 num_ports = 8; /* shld be benign for 4 port devs */
880 num_hcs = 2;
881 } else {
882 start_hc = port >> MV_PORT_HC_SHIFT;
883 start_port = port;
884 num_ports = num_hcs = 1;
885 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500886 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400887 num_ports > 1 ? num_ports - 1 : start_port);
888
889 if (NULL != pdev) {
890 DPRINTK("PCI config space regs:\n");
891 mv_dump_pci_cfg(pdev, 0x68);
892 }
893 DPRINTK("PCI regs:\n");
894 mv_dump_mem(mmio_base+0xc00, 0x3c);
895 mv_dump_mem(mmio_base+0xd00, 0x34);
896 mv_dump_mem(mmio_base+0xf00, 0x4);
897 mv_dump_mem(mmio_base+0x1d00, 0x6c);
898 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700899 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400900 DPRINTK("HC regs (HC %i):\n", hc);
901 mv_dump_mem(hc_base, 0x1c);
902 }
903 for (p = start_port; p < start_port + num_ports; p++) {
904 port_base = mv_port_base(mmio_base, p);
905 DPRINTK("EDMA regs (port %i):\n",p);
906 mv_dump_mem(port_base, 0x54);
907 DPRINTK("SATA regs (port %i):\n",p);
908 mv_dump_mem(port_base+0x300, 0x60);
909 }
910#endif
911}
912
Brett Russ20f733e2005-09-01 18:26:17 -0400913static unsigned int mv_scr_offset(unsigned int sc_reg_in)
914{
915 unsigned int ofs;
916
917 switch (sc_reg_in) {
918 case SCR_STATUS:
919 case SCR_CONTROL:
920 case SCR_ERROR:
921 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
922 break;
923 case SCR_ACTIVE:
924 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
925 break;
926 default:
927 ofs = 0xffffffffU;
928 break;
929 }
930 return ofs;
931}
932
933static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
934{
935 unsigned int ofs = mv_scr_offset(sc_reg_in);
936
Jeff Garzik35177262007-02-24 21:26:42 -0500937 if (0xffffffffU != ofs)
Brett Russ20f733e2005-09-01 18:26:17 -0400938 return readl(mv_ap_base(ap) + ofs);
Jeff Garzik35177262007-02-24 21:26:42 -0500939 else
Brett Russ20f733e2005-09-01 18:26:17 -0400940 return (u32) ofs;
Brett Russ20f733e2005-09-01 18:26:17 -0400941}
942
943static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
944{
945 unsigned int ofs = mv_scr_offset(sc_reg_in);
946
Jeff Garzik35177262007-02-24 21:26:42 -0500947 if (0xffffffffU != ofs)
Brett Russ20f733e2005-09-01 18:26:17 -0400948 writelfl(val, mv_ap_base(ap) + ofs);
Brett Russ20f733e2005-09-01 18:26:17 -0400949}
950
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400951static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
952 void __iomem *port_mmio)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500953{
954 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
955
956 /* set up non-NCQ EDMA configuration */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400957 cfg &= ~(1 << 9); /* disable eQue */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500958
Jeff Garzike728eab2007-02-25 02:53:41 -0500959 if (IS_GEN_I(hpriv)) {
960 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500961 cfg |= (1 << 8); /* enab config burst size mask */
Jeff Garzike728eab2007-02-25 02:53:41 -0500962 }
Jeff Garzike4e7b892006-01-31 12:18:41 -0500963
Jeff Garzike728eab2007-02-25 02:53:41 -0500964 else if (IS_GEN_II(hpriv)) {
965 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500966 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Jeff Garzike728eab2007-02-25 02:53:41 -0500967 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
968 }
Jeff Garzike4e7b892006-01-31 12:18:41 -0500969
970 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -0500971 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
972 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500973 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
974 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -0500975 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
976 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
977 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500978 }
979
980 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
981}
982
Brett Russ05b308e2005-10-05 17:08:53 -0400983/**
984 * mv_port_start - Port specific init/start routine.
985 * @ap: ATA channel to manipulate
986 *
987 * Allocate and point to DMA memory, init port private memory,
988 * zero indices.
989 *
990 * LOCKING:
991 * Inherited from caller.
992 */
Brett Russ31961942005-09-30 01:36:00 -0400993static int mv_port_start(struct ata_port *ap)
994{
Jeff Garzikcca39742006-08-24 03:19:22 -0400995 struct device *dev = ap->host->dev;
996 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400997 struct mv_port_priv *pp;
998 void __iomem *port_mmio = mv_ap_base(ap);
999 void *mem;
1000 dma_addr_t mem_dma;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001001 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001002
Tejun Heo24dc5f32007-01-20 16:00:28 +09001003 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001004 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001005 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001006
Tejun Heo24dc5f32007-01-20 16:00:28 +09001007 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1008 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001009 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001010 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001011 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1012
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001013 rc = ata_pad_alloc(ap, dev);
1014 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001015 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001016
Jeff Garzik8b260242005-11-12 12:32:50 -05001017 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001018 * 32-slot command request table (CRQB), 32 bytes each in size
1019 */
1020 pp->crqb = mem;
1021 pp->crqb_dma = mem_dma;
1022 mem += MV_CRQB_Q_SZ;
1023 mem_dma += MV_CRQB_Q_SZ;
1024
Jeff Garzik8b260242005-11-12 12:32:50 -05001025 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001026 * 32-slot command response table (CRPB), 8 bytes each in size
1027 */
1028 pp->crpb = mem;
1029 pp->crpb_dma = mem_dma;
1030 mem += MV_CRPB_Q_SZ;
1031 mem_dma += MV_CRPB_Q_SZ;
1032
1033 /* Third item:
1034 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1035 */
1036 pp->sg_tbl = mem;
1037 pp->sg_tbl_dma = mem_dma;
1038
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001039 mv_edma_cfg(ap, hpriv, port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04001040
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001041 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001042
Brett Russ31961942005-09-30 01:36:00 -04001043 /* Don't turn on EDMA here...do it before DMA commands only. Else
1044 * we'll be unable to send non-data, PIO, etc due to restricted access
1045 * to shadow regs.
1046 */
1047 ap->private_data = pp;
1048 return 0;
1049}
1050
Brett Russ05b308e2005-10-05 17:08:53 -04001051/**
1052 * mv_port_stop - Port specific cleanup/stop routine.
1053 * @ap: ATA channel to manipulate
1054 *
1055 * Stop DMA, cleanup port memory.
1056 *
1057 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001058 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001059 */
Brett Russ31961942005-09-30 01:36:00 -04001060static void mv_port_stop(struct ata_port *ap)
1061{
Brett Russafb0edd2005-10-05 17:08:42 -04001062 unsigned long flags;
Brett Russ31961942005-09-30 01:36:00 -04001063
Jeff Garzikcca39742006-08-24 03:19:22 -04001064 spin_lock_irqsave(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04001065 mv_stop_dma(ap);
Jeff Garzikcca39742006-08-24 03:19:22 -04001066 spin_unlock_irqrestore(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04001067}
1068
Brett Russ05b308e2005-10-05 17:08:53 -04001069/**
1070 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1071 * @qc: queued command whose SG list to source from
1072 *
1073 * Populate the SG list and mark the last entry.
1074 *
1075 * LOCKING:
1076 * Inherited from caller.
1077 */
Jeff Garzikd88184f2007-02-26 01:26:06 -05001078static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001079{
1080 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001081 unsigned int n_sg = 0;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001082 struct scatterlist *sg;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001083 struct mv_sg *mv_sg;
Brett Russ31961942005-09-30 01:36:00 -04001084
Jeff Garzikd88184f2007-02-26 01:26:06 -05001085 mv_sg = pp->sg_tbl;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001086 ata_for_each_sg(sg, qc) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001087 dma_addr_t addr = sg_dma_address(sg);
1088 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001089
Jeff Garzikd88184f2007-02-26 01:26:06 -05001090 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1091 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1092 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
Brett Russ31961942005-09-30 01:36:00 -04001093
Jeff Garzikd88184f2007-02-26 01:26:06 -05001094 if (ata_sg_is_last(sg, qc))
1095 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Jeff Garzik972c26b2005-10-18 22:14:54 -04001096
Jeff Garzikd88184f2007-02-26 01:26:06 -05001097 mv_sg++;
1098 n_sg++;
Brett Russ31961942005-09-30 01:36:00 -04001099 }
Jeff Garzikd88184f2007-02-26 01:26:06 -05001100
1101 return n_sg;
Brett Russ31961942005-09-30 01:36:00 -04001102}
1103
Mark Lorda6432432006-05-19 16:36:36 -04001104static inline unsigned mv_inc_q_index(unsigned index)
Brett Russ31961942005-09-30 01:36:00 -04001105{
Mark Lorda6432432006-05-19 16:36:36 -04001106 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001107}
1108
Mark Lorde1469872006-05-22 19:02:03 -04001109static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001110{
Mark Lord559eeda2006-05-19 16:40:15 -04001111 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001112 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001113 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001114}
1115
Brett Russ05b308e2005-10-05 17:08:53 -04001116/**
1117 * mv_qc_prep - Host specific command preparation.
1118 * @qc: queued command to prepare
1119 *
1120 * This routine simply redirects to the general purpose routine
1121 * if command is not DMA. Else, it handles prep of the CRQB
1122 * (command request block), does some sanity checking, and calls
1123 * the SG load routine.
1124 *
1125 * LOCKING:
1126 * Inherited from caller.
1127 */
Brett Russ31961942005-09-30 01:36:00 -04001128static void mv_qc_prep(struct ata_queued_cmd *qc)
1129{
1130 struct ata_port *ap = qc->ap;
1131 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001132 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001133 struct ata_taskfile *tf;
1134 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001135 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001136
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001137 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001138 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001139
Brett Russ31961942005-09-30 01:36:00 -04001140 /* Fill in command request block
1141 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001142 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001143 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001144 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001145 flags |= qc->tag << CRQB_TAG_SHIFT;
1146
Mark Lorda6432432006-05-19 16:36:36 -04001147 /* get current queue index from hardware */
1148 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1149 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001150
Mark Lorda6432432006-05-19 16:36:36 -04001151 pp->crqb[in_index].sg_addr =
1152 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1153 pp->crqb[in_index].sg_addr_hi =
1154 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1155 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1156
1157 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001158 tf = &qc->tf;
1159
1160 /* Sadly, the CRQB cannot accomodate all registers--there are
1161 * only 11 bytes...so we must pick and choose required
1162 * registers based on the command. So, we drop feature and
1163 * hob_feature for [RW] DMA commands, but they are needed for
1164 * NCQ. NCQ will drop hob_nsect.
1165 */
1166 switch (tf->command) {
1167 case ATA_CMD_READ:
1168 case ATA_CMD_READ_EXT:
1169 case ATA_CMD_WRITE:
1170 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001171 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001172 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1173 break;
1174#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1175 case ATA_CMD_FPDMA_READ:
1176 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001177 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001178 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1179 break;
1180#endif /* FIXME: remove this line when NCQ added */
1181 default:
1182 /* The only other commands EDMA supports in non-queued and
1183 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1184 * of which are defined/used by Linux. If we get here, this
1185 * driver needs work.
1186 *
1187 * FIXME: modify libata to give qc_prep a return value and
1188 * return error here.
1189 */
1190 BUG_ON(tf->command);
1191 break;
1192 }
1193 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1194 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1195 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1196 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1197 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1198 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1199 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1200 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1201 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1202
Jeff Garzike4e7b892006-01-31 12:18:41 -05001203 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001204 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001205 mv_fill_sg(qc);
1206}
1207
1208/**
1209 * mv_qc_prep_iie - Host specific command preparation.
1210 * @qc: queued command to prepare
1211 *
1212 * This routine simply redirects to the general purpose routine
1213 * if command is not DMA. Else, it handles prep of the CRQB
1214 * (command request block), does some sanity checking, and calls
1215 * the SG load routine.
1216 *
1217 * LOCKING:
1218 * Inherited from caller.
1219 */
1220static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1221{
1222 struct ata_port *ap = qc->ap;
1223 struct mv_port_priv *pp = ap->private_data;
1224 struct mv_crqb_iie *crqb;
1225 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001226 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001227 u32 flags = 0;
1228
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001229 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001230 return;
1231
Jeff Garzike4e7b892006-01-31 12:18:41 -05001232 /* Fill in Gen IIE command request block
1233 */
1234 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1235 flags |= CRQB_FLAG_READ;
1236
Tejun Heobeec7db2006-02-11 19:11:13 +09001237 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001238 flags |= qc->tag << CRQB_TAG_SHIFT;
1239
Mark Lorda6432432006-05-19 16:36:36 -04001240 /* get current queue index from hardware */
1241 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1242 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1243
1244 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001245 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1246 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1247 crqb->flags = cpu_to_le32(flags);
1248
1249 tf = &qc->tf;
1250 crqb->ata_cmd[0] = cpu_to_le32(
1251 (tf->command << 16) |
1252 (tf->feature << 24)
1253 );
1254 crqb->ata_cmd[1] = cpu_to_le32(
1255 (tf->lbal << 0) |
1256 (tf->lbam << 8) |
1257 (tf->lbah << 16) |
1258 (tf->device << 24)
1259 );
1260 crqb->ata_cmd[2] = cpu_to_le32(
1261 (tf->hob_lbal << 0) |
1262 (tf->hob_lbam << 8) |
1263 (tf->hob_lbah << 16) |
1264 (tf->hob_feature << 24)
1265 );
1266 crqb->ata_cmd[3] = cpu_to_le32(
1267 (tf->nsect << 0) |
1268 (tf->hob_nsect << 8)
1269 );
1270
1271 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1272 return;
Brett Russ31961942005-09-30 01:36:00 -04001273 mv_fill_sg(qc);
1274}
1275
Brett Russ05b308e2005-10-05 17:08:53 -04001276/**
1277 * mv_qc_issue - Initiate a command to the host
1278 * @qc: queued command to start
1279 *
1280 * This routine simply redirects to the general purpose routine
1281 * if command is not DMA. Else, it sanity checks our local
1282 * caches of the request producer/consumer indices then enables
1283 * DMA and bumps the request producer index.
1284 *
1285 * LOCKING:
1286 * Inherited from caller.
1287 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001288static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001289{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001290 struct ata_port *ap = qc->ap;
1291 void __iomem *port_mmio = mv_ap_base(ap);
1292 struct mv_port_priv *pp = ap->private_data;
1293 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lorda6432432006-05-19 16:36:36 -04001294 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001295 u32 in_ptr;
1296
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001297 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001298 /* We're about to send a non-EDMA capable command to the
1299 * port. Turn off EDMA so there won't be problems accessing
1300 * shadow block, etc registers.
1301 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001302 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001303 return ata_qc_issue_prot(qc);
1304 }
1305
Mark Lorda6432432006-05-19 16:36:36 -04001306 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1307 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001308
Brett Russ31961942005-09-30 01:36:00 -04001309 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001310 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1311 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001312
Mark Lorda6432432006-05-19 16:36:36 -04001313 in_index = mv_inc_q_index(in_index); /* now incr producer index */
Brett Russ31961942005-09-30 01:36:00 -04001314
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001315 mv_start_dma(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001316
1317 /* and write the request in pointer to kick the EDMA to life */
1318 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001319 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001320 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1321
1322 return 0;
1323}
1324
Brett Russ05b308e2005-10-05 17:08:53 -04001325/**
1326 * mv_get_crpb_status - get status from most recently completed cmd
1327 * @ap: ATA channel to manipulate
1328 *
1329 * This routine is for use when the port is in DMA mode, when it
1330 * will be using the CRPB (command response block) method of
Tejun Heobeec7db2006-02-11 19:11:13 +09001331 * returning command completion information. We check indices
Brett Russ05b308e2005-10-05 17:08:53 -04001332 * are good, grab status, and bump the response consumer index to
1333 * prove that we're up to date.
1334 *
1335 * LOCKING:
1336 * Inherited from caller.
1337 */
Brett Russ31961942005-09-30 01:36:00 -04001338static u8 mv_get_crpb_status(struct ata_port *ap)
1339{
1340 void __iomem *port_mmio = mv_ap_base(ap);
1341 struct mv_port_priv *pp = ap->private_data;
Mark Lorda6432432006-05-19 16:36:36 -04001342 unsigned out_index;
Brett Russ31961942005-09-30 01:36:00 -04001343 u32 out_ptr;
Mark Lord806a6e72006-03-21 21:11:53 -05001344 u8 ata_status;
Brett Russ31961942005-09-30 01:36:00 -04001345
Mark Lorda6432432006-05-19 16:36:36 -04001346 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1347 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001348
Mark Lorda6432432006-05-19 16:36:36 -04001349 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1350 >> CRPB_FLAG_STATUS_SHIFT;
Mark Lord806a6e72006-03-21 21:11:53 -05001351
Brett Russ31961942005-09-30 01:36:00 -04001352 /* increment our consumer index... */
Mark Lorda6432432006-05-19 16:36:36 -04001353 out_index = mv_inc_q_index(out_index);
Jeff Garzik8b260242005-11-12 12:32:50 -05001354
Brett Russ31961942005-09-30 01:36:00 -04001355 /* and, until we do NCQ, there should only be 1 CRPB waiting */
Mark Lorda6432432006-05-19 16:36:36 -04001356 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1357 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001358
1359 /* write out our inc'd consumer index so EDMA knows we're caught up */
1360 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001361 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001362 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1363
1364 /* Return ATA status register for completed CRPB */
Mark Lord806a6e72006-03-21 21:11:53 -05001365 return ata_status;
Brett Russ20f733e2005-09-01 18:26:17 -04001366}
1367
Brett Russ05b308e2005-10-05 17:08:53 -04001368/**
1369 * mv_err_intr - Handle error interrupts on the port
1370 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001371 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001372 *
1373 * In most cases, just clear the interrupt and move on. However,
1374 * some cases require an eDMA reset, which is done right before
1375 * the COMRESET in mv_phy_reset(). The SERR case requires a
1376 * clear of pending errors in the SATA SERROR register. Finally,
1377 * if the port disabled DMA, update our cached copy to match.
1378 *
1379 * LOCKING:
1380 * Inherited from caller.
1381 */
Mark Lord9b358e32006-05-19 16:21:03 -04001382static void mv_err_intr(struct ata_port *ap, int reset_allowed)
Brett Russ20f733e2005-09-01 18:26:17 -04001383{
Brett Russ31961942005-09-30 01:36:00 -04001384 void __iomem *port_mmio = mv_ap_base(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001385 u32 edma_err_cause, serr = 0;
1386
Brett Russ20f733e2005-09-01 18:26:17 -04001387 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1388
1389 if (EDMA_ERR_SERR & edma_err_cause) {
Tejun Heo81952c52006-05-15 20:57:47 +09001390 sata_scr_read(ap, SCR_ERROR, &serr);
1391 sata_scr_write_flush(ap, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001392 }
Brett Russafb0edd2005-10-05 17:08:42 -04001393 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1394 struct mv_port_priv *pp = ap->private_data;
1395 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1396 }
1397 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
Tejun Heo44877b42007-02-21 01:06:51 +09001398 "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001399
1400 /* Clear EDMA now that SERR cleanup done */
1401 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1402
1403 /* check for fatal here and recover if needed */
Mark Lord9b358e32006-05-19 16:21:03 -04001404 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
Jeff Garzikc9d39132005-11-13 17:47:51 -05001405 mv_stop_and_reset(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001406}
1407
Brett Russ05b308e2005-10-05 17:08:53 -04001408/**
1409 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001410 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001411 * @relevant: port error bits relevant to this host controller
1412 * @hc: which host controller we're to look at
1413 *
1414 * Read then write clear the HC interrupt status then walk each
1415 * port connected to the HC and see if it needs servicing. Port
1416 * success ints are reported in the HC interrupt status reg, the
1417 * port error ints are reported in the higher level main
1418 * interrupt status register and thus are passed in via the
1419 * 'relevant' argument.
1420 *
1421 * LOCKING:
1422 * Inherited from caller.
1423 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001424static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001425{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001426 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001427 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001428 struct ata_queued_cmd *qc;
1429 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001430 int port, port0;
1431 int shift, hard_port, handled;
Jeff Garzika7dac442005-10-30 04:44:42 -05001432 unsigned int err_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001433
Jeff Garzik35177262007-02-24 21:26:42 -05001434 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001435 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001436 else
Brett Russ20f733e2005-09-01 18:26:17 -04001437 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001438
1439 /* we'll need the HC success int register in most cases */
1440 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzik35177262007-02-24 21:26:42 -05001441 if (hc_irq_cause)
Brett Russ31961942005-09-30 01:36:00 -04001442 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001443
1444 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1445 hc,relevant,hc_irq_cause);
1446
1447 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcd85f6e2006-03-20 19:49:54 -05001448 u8 ata_status = 0;
Jeff Garzikcca39742006-08-24 03:19:22 -04001449 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001450 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001451
Mark Lorde857f142006-05-19 16:33:03 -04001452 hard_port = mv_hardport_from_port(port); /* range 0..3 */
Brett Russ31961942005-09-30 01:36:00 -04001453 handled = 0; /* ensure ata_status is set if handled++ */
Brett Russ20f733e2005-09-01 18:26:17 -04001454
Mark Lord63af2a52006-03-29 09:50:31 -05001455 /* Note that DEV_IRQ might happen spuriously during EDMA,
Mark Lorde857f142006-05-19 16:33:03 -04001456 * and should be ignored in such cases.
1457 * The cause of this is still under investigation.
Jeff Garzik8190bdb2006-05-24 01:53:39 -04001458 */
Mark Lord63af2a52006-03-29 09:50:31 -05001459 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1460 /* EDMA: check for response queue interrupt */
1461 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1462 ata_status = mv_get_crpb_status(ap);
1463 handled = 1;
1464 }
1465 } else {
1466 /* PIO: check for device (drive) interrupt */
1467 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001468 ata_status = readb(ap->ioaddr.status_addr);
Mark Lord63af2a52006-03-29 09:50:31 -05001469 handled = 1;
Mark Lorde857f142006-05-19 16:33:03 -04001470 /* ignore spurious intr if drive still BUSY */
1471 if (ata_status & ATA_BUSY) {
1472 ata_status = 0;
1473 handled = 0;
1474 }
Mark Lord63af2a52006-03-29 09:50:31 -05001475 }
Brett Russ20f733e2005-09-01 18:26:17 -04001476 }
1477
Jeff Garzik029f5462006-04-02 10:30:40 -04001478 if (ap && (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001479 continue;
1480
Jeff Garzika7dac442005-10-30 04:44:42 -05001481 err_mask = ac_err_mask(ata_status);
1482
Brett Russ31961942005-09-30 01:36:00 -04001483 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001484 if (port >= MV_PORTS_PER_HC) {
1485 shift++; /* skip bit 8 in the HC Main IRQ reg */
1486 }
1487 if ((PORT0_ERR << shift) & relevant) {
Mark Lord9b358e32006-05-19 16:21:03 -04001488 mv_err_intr(ap, 1);
Jeff Garzika7dac442005-10-30 04:44:42 -05001489 err_mask |= AC_ERR_OTHER;
Mark Lord63af2a52006-03-29 09:50:31 -05001490 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001491 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001492
Mark Lord63af2a52006-03-29 09:50:31 -05001493 if (handled) {
Brett Russ20f733e2005-09-01 18:26:17 -04001494 qc = ata_qc_from_tag(ap, ap->active_tag);
Mark Lord63af2a52006-03-29 09:50:31 -05001495 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
Brett Russ20f733e2005-09-01 18:26:17 -04001496 VPRINTK("port %u IRQ found for qc, "
1497 "ata_status 0x%x\n", port,ata_status);
Brett Russ20f733e2005-09-01 18:26:17 -04001498 /* mark qc status appropriately */
Jeff Garzik701db692005-12-06 04:52:48 -05001499 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
Albert Leea22e2eb2005-12-05 15:38:02 +08001500 qc->err_mask |= err_mask;
1501 ata_qc_complete(qc);
1502 }
Brett Russ20f733e2005-09-01 18:26:17 -04001503 }
1504 }
1505 }
1506 VPRINTK("EXIT\n");
1507}
1508
Brett Russ05b308e2005-10-05 17:08:53 -04001509/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001510 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001511 * @irq: unused
1512 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001513 *
1514 * Read the read only register to determine if any host
1515 * controllers have pending interrupts. If so, call lower level
1516 * routine to handle. Also check for PCI errors which are only
1517 * reported here.
1518 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001519 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001520 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001521 * interrupts.
1522 */
David Howells7d12e782006-10-05 14:55:46 +01001523static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001524{
Jeff Garzikcca39742006-08-24 03:19:22 -04001525 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001526 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001527 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord615ab952006-05-19 16:24:56 -04001528 struct mv_host_priv *hpriv;
Brett Russ20f733e2005-09-01 18:26:17 -04001529 u32 irq_stat;
1530
Brett Russ20f733e2005-09-01 18:26:17 -04001531 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001532
1533 /* check the cases where we either have nothing pending or have read
1534 * a bogus register value which can indicate HW removal or PCI fault
1535 */
Jeff Garzik35177262007-02-24 21:26:42 -05001536 if (!irq_stat || (0xffffffffU == irq_stat))
Brett Russ20f733e2005-09-01 18:26:17 -04001537 return IRQ_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04001538
Jeff Garzikcca39742006-08-24 03:19:22 -04001539 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1540 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001541
1542 for (hc = 0; hc < n_hcs; hc++) {
1543 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1544 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001545 mv_host_intr(host, relevant, hc);
Brett Russ31961942005-09-30 01:36:00 -04001546 handled++;
Brett Russ20f733e2005-09-01 18:26:17 -04001547 }
1548 }
Mark Lord615ab952006-05-19 16:24:56 -04001549
Jeff Garzikcca39742006-08-24 03:19:22 -04001550 hpriv = host->private_data;
Mark Lord615ab952006-05-19 16:24:56 -04001551 if (IS_60XX(hpriv)) {
1552 /* deal with the interrupt coalescing bits */
1553 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1554 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1555 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1556 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1557 }
1558 }
1559
Brett Russ20f733e2005-09-01 18:26:17 -04001560 if (PCI_ERR & irq_stat) {
Brett Russ31961942005-09-30 01:36:00 -04001561 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1562 readl(mmio + PCI_IRQ_CAUSE_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04001563
Brett Russafb0edd2005-10-05 17:08:42 -04001564 DPRINTK("All regs @ PCI error\n");
Jeff Garzikcca39742006-08-24 03:19:22 -04001565 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
Brett Russ31961942005-09-30 01:36:00 -04001566
1567 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1568 handled++;
1569 }
Jeff Garzikcca39742006-08-24 03:19:22 -04001570 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001571
1572 return IRQ_RETVAL(handled);
1573}
1574
Jeff Garzikc9d39132005-11-13 17:47:51 -05001575static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1576{
1577 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1578 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1579
1580 return hc_mmio + ofs;
1581}
1582
1583static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1584{
1585 unsigned int ofs;
1586
1587 switch (sc_reg_in) {
1588 case SCR_STATUS:
1589 case SCR_ERROR:
1590 case SCR_CONTROL:
1591 ofs = sc_reg_in * sizeof(u32);
1592 break;
1593 default:
1594 ofs = 0xffffffffU;
1595 break;
1596 }
1597 return ofs;
1598}
1599
1600static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1601{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001602 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1603 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001604 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1605
1606 if (ofs != 0xffffffffU)
Tejun Heo0d5ff562007-02-01 15:06:36 +09001607 return readl(addr + ofs);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001608 else
1609 return (u32) ofs;
1610}
1611
1612static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1613{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001614 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1615 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001616 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1617
1618 if (ofs != 0xffffffffU)
Tejun Heo0d5ff562007-02-01 15:06:36 +09001619 writelfl(val, addr + ofs);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001620}
1621
Jeff Garzik522479f2005-11-12 22:14:02 -05001622static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1623{
1624 u8 rev_id;
1625 int early_5080;
1626
1627 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1628
1629 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1630
1631 if (!early_5080) {
1632 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1633 tmp |= (1 << 0);
1634 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1635 }
1636
1637 mv_reset_pci_bus(pdev, mmio);
1638}
1639
1640static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1641{
1642 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1643}
1644
Jeff Garzik47c2b672005-11-12 21:13:17 -05001645static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001646 void __iomem *mmio)
1647{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001648 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1649 u32 tmp;
1650
1651 tmp = readl(phy_mmio + MV5_PHY_MODE);
1652
1653 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1654 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001655}
1656
Jeff Garzik47c2b672005-11-12 21:13:17 -05001657static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001658{
Jeff Garzik522479f2005-11-12 22:14:02 -05001659 u32 tmp;
1660
1661 writel(0, mmio + MV_GPIO_PORT_CTL);
1662
1663 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1664
1665 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1666 tmp |= ~(1 << 0);
1667 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001668}
1669
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001670static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1671 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001672{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001673 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1674 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1675 u32 tmp;
1676 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1677
1678 if (fix_apm_sq) {
1679 tmp = readl(phy_mmio + MV5_LT_MODE);
1680 tmp |= (1 << 19);
1681 writel(tmp, phy_mmio + MV5_LT_MODE);
1682
1683 tmp = readl(phy_mmio + MV5_PHY_CTL);
1684 tmp &= ~0x3;
1685 tmp |= 0x1;
1686 writel(tmp, phy_mmio + MV5_PHY_CTL);
1687 }
1688
1689 tmp = readl(phy_mmio + MV5_PHY_MODE);
1690 tmp &= ~mask;
1691 tmp |= hpriv->signal[port].pre;
1692 tmp |= hpriv->signal[port].amps;
1693 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001694}
1695
Jeff Garzikc9d39132005-11-13 17:47:51 -05001696
1697#undef ZERO
1698#define ZERO(reg) writel(0, port_mmio + (reg))
1699static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1700 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001701{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001702 void __iomem *port_mmio = mv_port_base(mmio, port);
1703
1704 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1705
1706 mv_channel_reset(hpriv, mmio, port);
1707
1708 ZERO(0x028); /* command */
1709 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1710 ZERO(0x004); /* timer */
1711 ZERO(0x008); /* irq err cause */
1712 ZERO(0x00c); /* irq err mask */
1713 ZERO(0x010); /* rq bah */
1714 ZERO(0x014); /* rq inp */
1715 ZERO(0x018); /* rq outp */
1716 ZERO(0x01c); /* respq bah */
1717 ZERO(0x024); /* respq outp */
1718 ZERO(0x020); /* respq inp */
1719 ZERO(0x02c); /* test control */
1720 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1721}
1722#undef ZERO
1723
1724#define ZERO(reg) writel(0, hc_mmio + (reg))
1725static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1726 unsigned int hc)
1727{
1728 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1729 u32 tmp;
1730
1731 ZERO(0x00c);
1732 ZERO(0x010);
1733 ZERO(0x014);
1734 ZERO(0x018);
1735
1736 tmp = readl(hc_mmio + 0x20);
1737 tmp &= 0x1c1c1c1c;
1738 tmp |= 0x03030303;
1739 writel(tmp, hc_mmio + 0x20);
1740}
1741#undef ZERO
1742
1743static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1744 unsigned int n_hc)
1745{
1746 unsigned int hc, port;
1747
1748 for (hc = 0; hc < n_hc; hc++) {
1749 for (port = 0; port < MV_PORTS_PER_HC; port++)
1750 mv5_reset_hc_port(hpriv, mmio,
1751 (hc * MV_PORTS_PER_HC) + port);
1752
1753 mv5_reset_one_hc(hpriv, mmio, hc);
1754 }
1755
1756 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001757}
1758
Jeff Garzik101ffae2005-11-12 22:17:49 -05001759#undef ZERO
1760#define ZERO(reg) writel(0, mmio + (reg))
1761static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1762{
1763 u32 tmp;
1764
1765 tmp = readl(mmio + MV_PCI_MODE);
1766 tmp &= 0xff00ffff;
1767 writel(tmp, mmio + MV_PCI_MODE);
1768
1769 ZERO(MV_PCI_DISC_TIMER);
1770 ZERO(MV_PCI_MSI_TRIGGER);
1771 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1772 ZERO(HC_MAIN_IRQ_MASK_OFS);
1773 ZERO(MV_PCI_SERR_MASK);
1774 ZERO(PCI_IRQ_CAUSE_OFS);
1775 ZERO(PCI_IRQ_MASK_OFS);
1776 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1777 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1778 ZERO(MV_PCI_ERR_ATTRIBUTE);
1779 ZERO(MV_PCI_ERR_COMMAND);
1780}
1781#undef ZERO
1782
1783static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1784{
1785 u32 tmp;
1786
1787 mv5_reset_flash(hpriv, mmio);
1788
1789 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1790 tmp &= 0x3;
1791 tmp |= (1 << 5) | (1 << 6);
1792 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1793}
1794
1795/**
1796 * mv6_reset_hc - Perform the 6xxx global soft reset
1797 * @mmio: base address of the HBA
1798 *
1799 * This routine only applies to 6xxx parts.
1800 *
1801 * LOCKING:
1802 * Inherited from caller.
1803 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05001804static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1805 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001806{
1807 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1808 int i, rc = 0;
1809 u32 t;
1810
1811 /* Following procedure defined in PCI "main command and status
1812 * register" table.
1813 */
1814 t = readl(reg);
1815 writel(t | STOP_PCI_MASTER, reg);
1816
1817 for (i = 0; i < 1000; i++) {
1818 udelay(1);
1819 t = readl(reg);
1820 if (PCI_MASTER_EMPTY & t) {
1821 break;
1822 }
1823 }
1824 if (!(PCI_MASTER_EMPTY & t)) {
1825 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1826 rc = 1;
1827 goto done;
1828 }
1829
1830 /* set reset */
1831 i = 5;
1832 do {
1833 writel(t | GLOB_SFT_RST, reg);
1834 t = readl(reg);
1835 udelay(1);
1836 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1837
1838 if (!(GLOB_SFT_RST & t)) {
1839 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1840 rc = 1;
1841 goto done;
1842 }
1843
1844 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1845 i = 5;
1846 do {
1847 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1848 t = readl(reg);
1849 udelay(1);
1850 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1851
1852 if (GLOB_SFT_RST & t) {
1853 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1854 rc = 1;
1855 }
1856done:
1857 return rc;
1858}
1859
Jeff Garzik47c2b672005-11-12 21:13:17 -05001860static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001861 void __iomem *mmio)
1862{
1863 void __iomem *port_mmio;
1864 u32 tmp;
1865
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001866 tmp = readl(mmio + MV_RESET_CFG);
1867 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05001868 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001869 hpriv->signal[idx].pre = 0x1 << 5;
1870 return;
1871 }
1872
1873 port_mmio = mv_port_base(mmio, idx);
1874 tmp = readl(port_mmio + PHY_MODE2);
1875
1876 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1877 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1878}
1879
Jeff Garzik47c2b672005-11-12 21:13:17 -05001880static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001881{
Jeff Garzik47c2b672005-11-12 21:13:17 -05001882 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001883}
1884
Jeff Garzikc9d39132005-11-13 17:47:51 -05001885static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001886 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001887{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001888 void __iomem *port_mmio = mv_port_base(mmio, port);
1889
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001890 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001891 int fix_phy_mode2 =
1892 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001893 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05001894 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1895 u32 m2, tmp;
1896
1897 if (fix_phy_mode2) {
1898 m2 = readl(port_mmio + PHY_MODE2);
1899 m2 &= ~(1 << 16);
1900 m2 |= (1 << 31);
1901 writel(m2, port_mmio + PHY_MODE2);
1902
1903 udelay(200);
1904
1905 m2 = readl(port_mmio + PHY_MODE2);
1906 m2 &= ~((1 << 16) | (1 << 31));
1907 writel(m2, port_mmio + PHY_MODE2);
1908
1909 udelay(200);
1910 }
1911
1912 /* who knows what this magic does */
1913 tmp = readl(port_mmio + PHY_MODE3);
1914 tmp &= ~0x7F800000;
1915 tmp |= 0x2A800000;
1916 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001917
1918 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05001919 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001920
1921 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05001922
1923 if (hp_flags & MV_HP_ERRATA_60X1B2)
1924 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001925
1926 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1927
1928 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05001929
1930 if (hp_flags & MV_HP_ERRATA_60X1B2)
1931 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001932 }
1933
1934 /* Revert values of pre-emphasis and signal amps to the saved ones */
1935 m2 = readl(port_mmio + PHY_MODE2);
1936
1937 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001938 m2 |= hpriv->signal[port].amps;
1939 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001940 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001941
Jeff Garzike4e7b892006-01-31 12:18:41 -05001942 /* according to mvSata 3.6.1, some IIE values are fixed */
1943 if (IS_GEN_IIE(hpriv)) {
1944 m2 &= ~0xC30FF01F;
1945 m2 |= 0x0000900F;
1946 }
1947
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001948 writel(m2, port_mmio + PHY_MODE2);
1949}
1950
Jeff Garzikc9d39132005-11-13 17:47:51 -05001951static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1952 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04001953{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001954 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04001955
Brett Russ31961942005-09-30 01:36:00 -04001956 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001957
1958 if (IS_60XX(hpriv)) {
1959 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04001960 ifctl |= (1 << 7); /* enable gen2i speed */
1961 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001962 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1963 }
1964
Brett Russ20f733e2005-09-01 18:26:17 -04001965 udelay(25); /* allow reset propagation */
1966
1967 /* Spec never mentions clearing the bit. Marvell's driver does
1968 * clear the bit, however.
1969 */
Brett Russ31961942005-09-30 01:36:00 -04001970 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001971
Jeff Garzikc9d39132005-11-13 17:47:51 -05001972 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1973
1974 if (IS_50XX(hpriv))
1975 mdelay(1);
1976}
1977
1978static void mv_stop_and_reset(struct ata_port *ap)
1979{
Jeff Garzikcca39742006-08-24 03:19:22 -04001980 struct mv_host_priv *hpriv = ap->host->private_data;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001981 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikc9d39132005-11-13 17:47:51 -05001982
1983 mv_stop_dma(ap);
1984
1985 mv_channel_reset(hpriv, mmio, ap->port_no);
1986
Jeff Garzik22374672005-11-17 10:59:48 -05001987 __mv_phy_reset(ap, 0);
1988}
1989
1990static inline void __msleep(unsigned int msec, int can_sleep)
1991{
1992 if (can_sleep)
1993 msleep(msec);
1994 else
1995 mdelay(msec);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001996}
1997
1998/**
Jeff Garzik22374672005-11-17 10:59:48 -05001999 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002000 * @ap: ATA channel to manipulate
2001 *
2002 * Part of this is taken from __sata_phy_reset and modified to
2003 * not sleep since this routine gets called from interrupt level.
2004 *
2005 * LOCKING:
2006 * Inherited from caller. This is coded to safe to call at
2007 * interrupt level, i.e. it does not sleep.
2008 */
Jeff Garzik22374672005-11-17 10:59:48 -05002009static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002010{
2011 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002012 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002013 void __iomem *port_mmio = mv_ap_base(ap);
2014 struct ata_taskfile tf;
2015 struct ata_device *dev = &ap->device[0];
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002016 unsigned long deadline;
Jeff Garzik22374672005-11-17 10:59:48 -05002017 int retry = 5;
2018 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002019
2020 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002021
Jeff Garzik095fec82005-11-12 09:50:49 -05002022 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Brett Russ31961942005-09-30 01:36:00 -04002023 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2024 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
Brett Russ20f733e2005-09-01 18:26:17 -04002025
Jeff Garzik22374672005-11-17 10:59:48 -05002026 /* Issue COMRESET via SControl */
2027comreset_retry:
Tejun Heo81952c52006-05-15 20:57:47 +09002028 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
Jeff Garzik22374672005-11-17 10:59:48 -05002029 __msleep(1, can_sleep);
2030
Tejun Heo81952c52006-05-15 20:57:47 +09002031 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
Jeff Garzik22374672005-11-17 10:59:48 -05002032 __msleep(20, can_sleep);
2033
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002034 deadline = jiffies + msecs_to_jiffies(200);
Brett Russ31961942005-09-30 01:36:00 -04002035 do {
Tejun Heo81952c52006-05-15 20:57:47 +09002036 sata_scr_read(ap, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002037 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002038 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002039
2040 __msleep(1, can_sleep);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002041 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002042
Jeff Garzik22374672005-11-17 10:59:48 -05002043 /* work around errata */
2044 if (IS_60XX(hpriv) &&
2045 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2046 (retry-- > 0))
2047 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002048
2049 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
Brett Russ31961942005-09-30 01:36:00 -04002050 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2051 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2052
Tejun Heo81952c52006-05-15 20:57:47 +09002053 if (ata_port_online(ap)) {
Brett Russ31961942005-09-30 01:36:00 -04002054 ata_port_probe(ap);
2055 } else {
Tejun Heo81952c52006-05-15 20:57:47 +09002056 sata_scr_read(ap, SCR_STATUS, &sstatus);
Tejun Heof15a1da2006-05-15 20:57:56 +09002057 ata_port_printk(ap, KERN_INFO,
2058 "no device found (phy stat %08x)\n", sstatus);
Brett Russ31961942005-09-30 01:36:00 -04002059 ata_port_disable(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04002060 return;
2061 }
2062
Jeff Garzik22374672005-11-17 10:59:48 -05002063 /* even after SStatus reflects that device is ready,
2064 * it seems to take a while for link to be fully
2065 * established (and thus Status no longer 0x80/0x7F),
2066 * so we poll a bit for that, here.
2067 */
2068 retry = 20;
2069 while (1) {
2070 u8 drv_stat = ata_check_status(ap);
2071 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2072 break;
2073 __msleep(500, can_sleep);
2074 if (retry-- <= 0)
2075 break;
2076 }
2077
Tejun Heo0d5ff562007-02-01 15:06:36 +09002078 tf.lbah = readb(ap->ioaddr.lbah_addr);
2079 tf.lbam = readb(ap->ioaddr.lbam_addr);
2080 tf.lbal = readb(ap->ioaddr.lbal_addr);
2081 tf.nsect = readb(ap->ioaddr.nsect_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04002082
2083 dev->class = ata_dev_classify(&tf);
Tejun Heoe1211e32006-04-01 01:38:18 +09002084 if (!ata_dev_enabled(dev)) {
Brett Russ20f733e2005-09-01 18:26:17 -04002085 VPRINTK("Port disabled post-sig: No device present.\n");
2086 ata_port_disable(ap);
2087 }
Jeff Garzik095fec82005-11-12 09:50:49 -05002088
2089 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2090
2091 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2092
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002093 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002094}
2095
Jeff Garzik22374672005-11-17 10:59:48 -05002096static void mv_phy_reset(struct ata_port *ap)
2097{
2098 __mv_phy_reset(ap, 1);
2099}
2100
Brett Russ05b308e2005-10-05 17:08:53 -04002101/**
2102 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2103 * @ap: ATA channel to manipulate
2104 *
2105 * Intent is to clear all pending error conditions, reset the
2106 * chip/bus, fail the command, and move on.
2107 *
2108 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04002109 * This routine holds the host lock while failing the command.
Brett Russ05b308e2005-10-05 17:08:53 -04002110 */
Brett Russ31961942005-09-30 01:36:00 -04002111static void mv_eng_timeout(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002112{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002113 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Brett Russ31961942005-09-30 01:36:00 -04002114 struct ata_queued_cmd *qc;
Mark Lord2f9719b2006-06-07 12:53:29 -04002115 unsigned long flags;
Brett Russ31961942005-09-30 01:36:00 -04002116
Tejun Heof15a1da2006-05-15 20:57:56 +09002117 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
Brett Russ31961942005-09-30 01:36:00 -04002118 DPRINTK("All regs @ start of eng_timeout\n");
Tejun Heo0d5ff562007-02-01 15:06:36 +09002119 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
Brett Russ31961942005-09-30 01:36:00 -04002120
2121 qc = ata_qc_from_tag(ap, ap->active_tag);
2122 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
Tejun Heo0d5ff562007-02-01 15:06:36 +09002123 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
Brett Russ31961942005-09-30 01:36:00 -04002124
Jeff Garzikcca39742006-08-24 03:19:22 -04002125 spin_lock_irqsave(&ap->host->lock, flags);
Mark Lord9b358e32006-05-19 16:21:03 -04002126 mv_err_intr(ap, 0);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002127 mv_stop_and_reset(ap);
Jeff Garzikcca39742006-08-24 03:19:22 -04002128 spin_unlock_irqrestore(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04002129
Mark Lord9b358e32006-05-19 16:21:03 -04002130 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2131 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2132 qc->err_mask |= AC_ERR_TIMEOUT;
2133 ata_eh_qc_complete(qc);
2134 }
Brett Russ31961942005-09-30 01:36:00 -04002135}
2136
Brett Russ05b308e2005-10-05 17:08:53 -04002137/**
2138 * mv_port_init - Perform some early initialization on a single port.
2139 * @port: libata data structure storing shadow register addresses
2140 * @port_mmio: base address of the port
2141 *
2142 * Initialize shadow register mmio addresses, clear outstanding
2143 * interrupts on the port, and unmask interrupts for the future
2144 * start of the port.
2145 *
2146 * LOCKING:
2147 * Inherited from caller.
2148 */
Brett Russ31961942005-09-30 01:36:00 -04002149static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2150{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002151 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002152 unsigned serr_ofs;
2153
Jeff Garzik8b260242005-11-12 12:32:50 -05002154 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002155 */
2156 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002157 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002158 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2159 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2160 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2161 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2162 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2163 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002164 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002165 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2166 /* special case: control/altstatus doesn't have ATA_REG_ address */
2167 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2168
2169 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002170 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002171
Brett Russ31961942005-09-30 01:36:00 -04002172 /* Clear any currently outstanding port interrupt conditions */
2173 serr_ofs = mv_scr_offset(SCR_ERROR);
2174 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2175 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2176
Brett Russ20f733e2005-09-01 18:26:17 -04002177 /* unmask all EDMA error interrupts */
Brett Russ31961942005-09-30 01:36:00 -04002178 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002179
Jeff Garzik8b260242005-11-12 12:32:50 -05002180 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002181 readl(port_mmio + EDMA_CFG_OFS),
2182 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2183 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002184}
2185
Tejun Heo4447d352007-04-17 23:44:08 +09002186static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002187{
Tejun Heo4447d352007-04-17 23:44:08 +09002188 struct pci_dev *pdev = to_pci_dev(host->dev);
2189 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002190 u8 rev_id;
2191 u32 hp_flags = hpriv->hp_flags;
2192
2193 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2194
2195 switch(board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002196 case chip_5080:
2197 hpriv->ops = &mv5xxx_ops;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002198 hp_flags |= MV_HP_50XX;
2199
Jeff Garzik47c2b672005-11-12 21:13:17 -05002200 switch (rev_id) {
2201 case 0x1:
2202 hp_flags |= MV_HP_ERRATA_50XXB0;
2203 break;
2204 case 0x3:
2205 hp_flags |= MV_HP_ERRATA_50XXB2;
2206 break;
2207 default:
2208 dev_printk(KERN_WARNING, &pdev->dev,
2209 "Applying 50XXB2 workarounds to unknown rev\n");
2210 hp_flags |= MV_HP_ERRATA_50XXB2;
2211 break;
2212 }
2213 break;
2214
2215 case chip_504x:
2216 case chip_508x:
2217 hpriv->ops = &mv5xxx_ops;
2218 hp_flags |= MV_HP_50XX;
2219
2220 switch (rev_id) {
2221 case 0x0:
2222 hp_flags |= MV_HP_ERRATA_50XXB0;
2223 break;
2224 case 0x3:
2225 hp_flags |= MV_HP_ERRATA_50XXB2;
2226 break;
2227 default:
2228 dev_printk(KERN_WARNING, &pdev->dev,
2229 "Applying B2 workarounds to unknown rev\n");
2230 hp_flags |= MV_HP_ERRATA_50XXB2;
2231 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002232 }
2233 break;
2234
2235 case chip_604x:
2236 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002237 hpriv->ops = &mv6xxx_ops;
2238
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002239 switch (rev_id) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002240 case 0x7:
2241 hp_flags |= MV_HP_ERRATA_60X1B2;
2242 break;
2243 case 0x9:
2244 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002245 break;
2246 default:
2247 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002248 "Applying B2 workarounds to unknown rev\n");
2249 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002250 break;
2251 }
2252 break;
2253
Jeff Garzike4e7b892006-01-31 12:18:41 -05002254 case chip_7042:
2255 case chip_6042:
2256 hpriv->ops = &mv6xxx_ops;
2257
2258 hp_flags |= MV_HP_GEN_IIE;
2259
2260 switch (rev_id) {
2261 case 0x0:
2262 hp_flags |= MV_HP_ERRATA_XX42A0;
2263 break;
2264 case 0x1:
2265 hp_flags |= MV_HP_ERRATA_60X1C0;
2266 break;
2267 default:
2268 dev_printk(KERN_WARNING, &pdev->dev,
2269 "Applying 60X1C0 workarounds to unknown rev\n");
2270 hp_flags |= MV_HP_ERRATA_60X1C0;
2271 break;
2272 }
2273 break;
2274
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002275 default:
2276 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2277 return 1;
2278 }
2279
2280 hpriv->hp_flags = hp_flags;
2281
2282 return 0;
2283}
2284
Brett Russ05b308e2005-10-05 17:08:53 -04002285/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002286 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002287 * @host: ATA host to initialize
2288 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002289 *
2290 * If possible, do an early global reset of the host. Then do
2291 * our port init and clear/unmask all/relevant host interrupts.
2292 *
2293 * LOCKING:
2294 * Inherited from caller.
2295 */
Tejun Heo4447d352007-04-17 23:44:08 +09002296static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002297{
2298 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002299 struct pci_dev *pdev = to_pci_dev(host->dev);
2300 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2301 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002302
Jeff Garzik47c2b672005-11-12 21:13:17 -05002303 /* global interrupt mask */
2304 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2305
Tejun Heo4447d352007-04-17 23:44:08 +09002306 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002307 if (rc)
2308 goto done;
2309
Tejun Heo4447d352007-04-17 23:44:08 +09002310 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002311
Tejun Heo4447d352007-04-17 23:44:08 +09002312 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002313 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002314
Jeff Garzikc9d39132005-11-13 17:47:51 -05002315 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002316 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002317 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002318
Jeff Garzik522479f2005-11-12 22:14:02 -05002319 hpriv->ops->reset_flash(hpriv, mmio);
2320 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002321 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002322
Tejun Heo4447d352007-04-17 23:44:08 +09002323 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002324 if (IS_60XX(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002325 void __iomem *port_mmio = mv_port_base(mmio, port);
2326
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002327 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002328 ifctl |= (1 << 7); /* enable gen2i speed */
2329 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002330 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2331 }
2332
Jeff Garzikc9d39132005-11-13 17:47:51 -05002333 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002334 }
2335
Tejun Heo4447d352007-04-17 23:44:08 +09002336 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002337 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heo4447d352007-04-17 23:44:08 +09002338 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002339 }
2340
2341 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002342 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2343
2344 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2345 "(before clear)=0x%08x\n", hc,
2346 readl(hc_mmio + HC_CFG_OFS),
2347 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2348
2349 /* Clear any currently outstanding hc interrupt conditions */
2350 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002351 }
2352
Brett Russ31961942005-09-30 01:36:00 -04002353 /* Clear any currently outstanding host interrupt conditions */
2354 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2355
2356 /* and unmask interrupt generation for host regs */
2357 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002358
2359 if (IS_50XX(hpriv))
2360 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2361 else
2362 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002363
2364 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002365 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002366 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2367 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2368 readl(mmio + PCI_IRQ_CAUSE_OFS),
2369 readl(mmio + PCI_IRQ_MASK_OFS));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002370
Brett Russ31961942005-09-30 01:36:00 -04002371done:
Brett Russ20f733e2005-09-01 18:26:17 -04002372 return rc;
2373}
2374
Brett Russ05b308e2005-10-05 17:08:53 -04002375/**
2376 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002377 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002378 *
2379 * FIXME: complete this.
2380 *
2381 * LOCKING:
2382 * Inherited from caller.
2383 */
Tejun Heo4447d352007-04-17 23:44:08 +09002384static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002385{
Tejun Heo4447d352007-04-17 23:44:08 +09002386 struct pci_dev *pdev = to_pci_dev(host->dev);
2387 struct mv_host_priv *hpriv = host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04002388 u8 rev_id, scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002389 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002390
2391 /* Use this to determine the HW stepping of the chip so we know
2392 * what errata to workaround
2393 */
2394 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2395
2396 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2397 if (scc == 0)
2398 scc_s = "SCSI";
2399 else if (scc == 0x01)
2400 scc_s = "RAID";
2401 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002402 scc_s = "?";
2403
2404 if (IS_GEN_I(hpriv))
2405 gen = "I";
2406 else if (IS_GEN_II(hpriv))
2407 gen = "II";
2408 else if (IS_GEN_IIE(hpriv))
2409 gen = "IIE";
2410 else
2411 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002412
Jeff Garzika9524a72005-10-30 14:39:11 -05002413 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002414 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2415 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002416 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2417}
2418
Brett Russ05b308e2005-10-05 17:08:53 -04002419/**
2420 * mv_init_one - handle a positive probe of a Marvell host
2421 * @pdev: PCI device found
2422 * @ent: PCI device ID entry for the matched host
2423 *
2424 * LOCKING:
2425 * Inherited from caller.
2426 */
Brett Russ20f733e2005-09-01 18:26:17 -04002427static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2428{
2429 static int printed_version = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002430 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002431 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2432 struct ata_host *host;
2433 struct mv_host_priv *hpriv;
2434 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002435
Jeff Garzika9524a72005-10-30 14:39:11 -05002436 if (!printed_version++)
2437 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002438
Tejun Heo4447d352007-04-17 23:44:08 +09002439 /* allocate host */
2440 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2441
2442 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2443 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2444 if (!host || !hpriv)
2445 return -ENOMEM;
2446 host->private_data = hpriv;
2447
2448 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002449 rc = pcim_enable_device(pdev);
2450 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002451 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002452
Tejun Heo0d5ff562007-02-01 15:06:36 +09002453 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2454 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002455 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002456 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002457 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002458 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002459
Jeff Garzikd88184f2007-02-26 01:26:06 -05002460 rc = pci_go_64(pdev);
2461 if (rc)
2462 return rc;
2463
Brett Russ20f733e2005-09-01 18:26:17 -04002464 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002465 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002466 if (rc)
2467 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002468
Brett Russ31961942005-09-30 01:36:00 -04002469 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002470 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002471 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002472
Brett Russ31961942005-09-30 01:36:00 -04002473 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002474 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002475
Tejun Heo4447d352007-04-17 23:44:08 +09002476 pci_set_master(pdev);
2477 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002478 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002479}
2480
2481static int __init mv_init(void)
2482{
Pavel Roskinb7887192006-08-10 18:13:18 +09002483 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002484}
2485
2486static void __exit mv_exit(void)
2487{
2488 pci_unregister_driver(&mv_pci_driver);
2489}
2490
2491MODULE_AUTHOR("Brett Russ");
2492MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2493MODULE_LICENSE("GPL");
2494MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2495MODULE_VERSION(DRV_VERSION);
2496
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002497module_param(msi, int, 0444);
2498MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2499
Brett Russ20f733e2005-09-01 18:26:17 -04002500module_init(mv_init);
2501module_exit(mv_exit);