sata_mv.c revision 68d1d07b510bb57a504588adc2bd2758adea0965
1/*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc.  All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25  sata_mv TODO list:
26
27  1) Needs a full errata audit for all chipsets.  I implemented most
28  of the errata workarounds found in the Marvell vendor driver, but
29  I distinctly remember a couple workarounds (one related to PCI-X)
30  are still needed.
31
32  2) Improve/fix IRQ and error handling sequences.
33
34  3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36  4) Think about TCQ support here, and for libata in general
37  with controllers that suppport it via host-queuing hardware
38  (a software-only implementation could be a nightmare).
39
40  5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42  6) Add port multiplier support (intermediate)
43
44  8) Develop a low-power-consumption strategy, and implement it.
45
46  9) [Experiment, low priority] See if ATAPI can be supported using
47  "unknown FIS" or "vendor-specific FIS" support, or something creative
48  like that.
49
50  10) [Experiment, low priority] Investigate interrupt coalescing.
51  Quite often, especially with PCI Message Signalled Interrupts (MSI),
52  the overhead reduced by interrupt mitigation is quite often not
53  worth the latency cost.
54
55  11) [Experiment, Marvell value added] Is it possible to use target
56  mode to cross-connect two Linux boxes with Marvell cards?  If so,
57  creating LibATA target mode support would be very interesting.
58
59  Target mode, for those without docs, is the ability to directly
60  connect two SATA controllers.
61
62*/
63
64
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
72#include <linux/dmapool.h>
73#include <linux/dma-mapping.h>
74#include <linux/device.h>
75#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
77#include <scsi/scsi_host.h>
78#include <scsi/scsi_cmnd.h>
79#include <scsi/scsi_device.h>
80#include <linux/libata.h>
81
82#define DRV_NAME	"sata_mv"
83#define DRV_VERSION	"1.20"
84
85enum {
86	/* BAR's are enumerated in terms of pci_resource_start() terms */
87	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
88	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
89	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
90
91	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
92	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
93
94	MV_PCI_REG_BASE		= 0,
95	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
96	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
97	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
98	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
99	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
100	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),
101
102	MV_SATAHC0_REG_BASE	= 0x20000,
103	MV_FLASH_CTL		= 0x1046c,
104	MV_GPIO_PORT_CTL	= 0x104f0,
105	MV_RESET_CFG		= 0x180d8,
106
107	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
108	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
109	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
110	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
111
112	MV_MAX_Q_DEPTH		= 32,
113	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
114
115	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
116	 * CRPB needs alignment on a 256B boundary. Size == 256B
117	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118	 */
119	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
120	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
121	MV_MAX_SG_CT		= 256,
122	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
123
124	MV_PORTS_PER_HC		= 4,
125	/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126	MV_PORT_HC_SHIFT	= 2,
127	/* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
128	MV_PORT_MASK		= 3,
129
130	/* Host Flags */
131	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
132	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
133	/* SoC integrated controllers, no PCI interface */
134	MV_FLAG_SOC = (1 << 28),
135
136	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138				  ATA_FLAG_PIO_POLLING,
139	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
140
141	CRQB_FLAG_READ		= (1 << 0),
142	CRQB_TAG_SHIFT		= 1,
143	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
144	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
145	CRQB_CMD_ADDR_SHIFT	= 8,
146	CRQB_CMD_CS		= (0x2 << 11),
147	CRQB_CMD_LAST		= (1 << 15),
148
149	CRPB_FLAG_STATUS_SHIFT	= 8,
150	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
151	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
152
153	EPRD_FLAG_END_OF_TBL	= (1 << 31),
154
155	/* PCI interface registers */
156
157	PCI_COMMAND_OFS		= 0xc00,
158
159	PCI_MAIN_CMD_STS_OFS	= 0xd30,
160	STOP_PCI_MASTER		= (1 << 2),
161	PCI_MASTER_EMPTY	= (1 << 3),
162	GLOB_SFT_RST		= (1 << 4),
163
164	MV_PCI_MODE		= 0xd00,
165	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
166	MV_PCI_DISC_TIMER	= 0xd04,
167	MV_PCI_MSI_TRIGGER	= 0xc38,
168	MV_PCI_SERR_MASK	= 0xc28,
169	MV_PCI_XBAR_TMOUT	= 0x1d04,
170	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
171	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
172	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
173	MV_PCI_ERR_COMMAND	= 0x1d50,
174
175	PCI_IRQ_CAUSE_OFS	= 0x1d58,
176	PCI_IRQ_MASK_OFS	= 0x1d5c,
177	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
178
179	PCIE_IRQ_CAUSE_OFS	= 0x1900,
180	PCIE_IRQ_MASK_OFS	= 0x1910,
181	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
182
183	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
184	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
185	HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186	HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
187	PORT0_ERR		= (1 << 0),	/* shift by port # */
188	PORT0_DONE		= (1 << 1),	/* shift by port # */
189	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
190	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
191	PCI_ERR			= (1 << 18),
192	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
193	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
194	PORTS_0_3_COAL_DONE	= (1 << 8),
195	PORTS_4_7_COAL_DONE	= (1 << 17),
196	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
197	GPIO_INT		= (1 << 22),
198	SELF_INT		= (1 << 23),
199	TWSI_INT		= (1 << 24),
200	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
201	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
202	HC_MAIN_RSVD_SOC 	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
203	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
204				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205				   HC_MAIN_RSVD),
206	HC_MAIN_MASKED_IRQS_5	= (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207				   HC_MAIN_RSVD_5),
208	HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
209
210	/* SATAHC registers */
211	HC_CFG_OFS		= 0,
212
213	HC_IRQ_CAUSE_OFS	= 0x14,
214	CRPB_DMA_DONE		= (1 << 0),	/* shift by port # */
215	HC_IRQ_COAL		= (1 << 4),	/* IRQ coalescing */
216	DEV_IRQ			= (1 << 8),	/* shift by port # */
217
218	/* Shadow block registers */
219	SHD_BLK_OFS		= 0x100,
220	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
221
222	/* SATA registers */
223	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
224	SATA_ACTIVE_OFS		= 0x350,
225	SATA_FIS_IRQ_CAUSE_OFS	= 0x364,
226	PHY_MODE3		= 0x310,
227	PHY_MODE4		= 0x314,
228	PHY_MODE2		= 0x330,
229	MV5_PHY_MODE		= 0x74,
230	MV5_LT_MODE		= 0x30,
231	MV5_PHY_CTL		= 0x0C,
232	SATA_INTERFACE_CTL	= 0x050,
233
234	MV_M2_PREAMP_MASK	= 0x7e0,
235
236	/* Port registers */
237	EDMA_CFG_OFS		= 0,
238	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
239	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
240	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
241	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
242	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
243
244	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
245	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
246	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
247	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
248	EDMA_ERR_DEV		= (1 << 2),	/* device error */
249	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
250	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
251	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
252	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
253	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
254	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
255	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
256	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
257	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
258	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
259	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
260
261	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
262	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
263	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
264	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
265	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */
266
267	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
268
269	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
270	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
271	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
272	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
273	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
274	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */
275
276	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
277
278	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
279	EDMA_ERR_OVERRUN_5	= (1 << 5),
280	EDMA_ERR_UNDERRUN_5	= (1 << 6),
281
282	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
283				  EDMA_ERR_LNK_CTRL_RX_1 |
284				  EDMA_ERR_LNK_CTRL_RX_3 |
285				  EDMA_ERR_LNK_CTRL_TX,
286
287	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
288				  EDMA_ERR_PRD_PAR |
289				  EDMA_ERR_DEV_DCON |
290				  EDMA_ERR_DEV_CON |
291				  EDMA_ERR_SERR |
292				  EDMA_ERR_SELF_DIS |
293				  EDMA_ERR_CRQB_PAR |
294				  EDMA_ERR_CRPB_PAR |
295				  EDMA_ERR_INTRL_PAR |
296				  EDMA_ERR_IORDY |
297				  EDMA_ERR_LNK_CTRL_RX_2 |
298				  EDMA_ERR_LNK_DATA_RX |
299				  EDMA_ERR_LNK_DATA_TX |
300				  EDMA_ERR_TRANS_PROTO,
301	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
302				  EDMA_ERR_PRD_PAR |
303				  EDMA_ERR_DEV_DCON |
304				  EDMA_ERR_DEV_CON |
305				  EDMA_ERR_OVERRUN_5 |
306				  EDMA_ERR_UNDERRUN_5 |
307				  EDMA_ERR_SELF_DIS_5 |
308				  EDMA_ERR_CRQB_PAR |
309				  EDMA_ERR_CRPB_PAR |
310				  EDMA_ERR_INTRL_PAR |
311				  EDMA_ERR_IORDY,
312
313	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
314	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */
315
316	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
317	EDMA_REQ_Q_PTR_SHIFT	= 5,
318
319	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
320	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
321	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
322	EDMA_RSP_Q_PTR_SHIFT	= 3,
323
324	EDMA_CMD_OFS		= 0x28,		/* EDMA command register */
325	EDMA_EN			= (1 << 0),	/* enable EDMA */
326	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
327	ATA_RST			= (1 << 2),	/* reset trans/link/phy */
328
329	EDMA_IORDY_TMOUT	= 0x34,
330	EDMA_ARB_CFG		= 0x38,
331
332	/* Host private flags (hp_flags) */
333	MV_HP_FLAG_MSI		= (1 << 0),
334	MV_HP_ERRATA_50XXB0	= (1 << 1),
335	MV_HP_ERRATA_50XXB2	= (1 << 2),
336	MV_HP_ERRATA_60X1B2	= (1 << 3),
337	MV_HP_ERRATA_60X1C0	= (1 << 4),
338	MV_HP_ERRATA_XX42A0	= (1 << 5),
339	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
340	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
341	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
342	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
343
344	/* Port private flags (pp_flags) */
345	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
346	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
347	MV_PP_FLAG_HAD_A_RESET	= (1 << 2),	/* 1st hard reset complete? */
348};
349
350#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
354
355enum {
356	/* DMA boundary 0xffff is required by the s/g splitting
357	 * we need on /length/ in mv_fill-sg().
358	 */
359	MV_DMA_BOUNDARY		= 0xffffU,
360
361	/* mask of register bits containing lower 32 bits
362	 * of EDMA request queue DMA address
363	 */
364	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
365
366	/* ditto, for response queue */
367	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
368};
369
370enum chip_type {
371	chip_504x,
372	chip_508x,
373	chip_5080,
374	chip_604x,
375	chip_608x,
376	chip_6042,
377	chip_7042,
378	chip_soc,
379};
380
381/* Command ReQuest Block: 32B */
382struct mv_crqb {
383	__le32			sg_addr;
384	__le32			sg_addr_hi;
385	__le16			ctrl_flags;
386	__le16			ata_cmd[11];
387};
388
389struct mv_crqb_iie {
390	__le32			addr;
391	__le32			addr_hi;
392	__le32			flags;
393	__le32			len;
394	__le32			ata_cmd[4];
395};
396
397/* Command ResPonse Block: 8B */
398struct mv_crpb {
399	__le16			id;
400	__le16			flags;
401	__le32			tmstmp;
402};
403
404/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405struct mv_sg {
406	__le32			addr;
407	__le32			flags_size;
408	__le32			addr_hi;
409	__le32			reserved;
410};
411
412struct mv_port_priv {
413	struct mv_crqb		*crqb;
414	dma_addr_t		crqb_dma;
415	struct mv_crpb		*crpb;
416	dma_addr_t		crpb_dma;
417	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
418	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
419
420	unsigned int		req_idx;
421	unsigned int		resp_idx;
422
423	u32			pp_flags;
424};
425
426struct mv_port_signal {
427	u32			amps;
428	u32			pre;
429};
430
431struct mv_host_priv {
432	u32			hp_flags;
433	struct mv_port_signal	signal[8];
434	const struct mv_hw_ops	*ops;
435	int			n_ports;
436	void __iomem		*base;
437	void __iomem		*main_cause_reg_addr;
438	void __iomem		*main_mask_reg_addr;
439	u32			irq_cause_ofs;
440	u32			irq_mask_ofs;
441	u32			unmask_all_irqs;
442	/*
443	 * These consistent DMA memory pools give us guaranteed
444	 * alignment for hardware-accessed data structures,
445	 * and less memory waste in accomplishing the alignment.
446	 */
447	struct dma_pool		*crqb_pool;
448	struct dma_pool		*crpb_pool;
449	struct dma_pool		*sg_tbl_pool;
450};
451
452struct mv_hw_ops {
453	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454			   unsigned int port);
455	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457			   void __iomem *mmio);
458	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459			unsigned int n_hc);
460	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
461	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
462};
463
464static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
468static int mv_port_start(struct ata_port *ap);
469static void mv_port_stop(struct ata_port *ap);
470static void mv_qc_prep(struct ata_queued_cmd *qc);
471static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
472static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
473static void mv_error_handler(struct ata_port *ap);
474static void mv_eh_freeze(struct ata_port *ap);
475static void mv_eh_thaw(struct ata_port *ap);
476static void mv6_dev_config(struct ata_device *dev);
477
478static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
479			   unsigned int port);
480static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
481static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
482			   void __iomem *mmio);
483static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
484			unsigned int n_hc);
485static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
486static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
487
488static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
489			   unsigned int port);
490static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
491static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
492			   void __iomem *mmio);
493static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
494			unsigned int n_hc);
495static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
496static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
497				      void __iomem *mmio);
498static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
499				      void __iomem *mmio);
500static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
501				  void __iomem *mmio, unsigned int n_hc);
502static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
503				      void __iomem *mmio);
504static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
505static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
506static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
507			     unsigned int port_no);
508static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
509			void __iomem *port_mmio, int want_ncq);
510static int __mv_stop_dma(struct ata_port *ap);
511
512/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
515 */
516static struct scsi_host_template mv5_sht = {
517	ATA_BASE_SHT(DRV_NAME),
518	.sg_tablesize		= MV_MAX_SG_CT / 2,
519	.dma_boundary		= MV_DMA_BOUNDARY,
520};
521
522static struct scsi_host_template mv6_sht = {
523	ATA_NCQ_SHT(DRV_NAME),
524	.can_queue		= MV_MAX_Q_DEPTH - 1,
525	.sg_tablesize		= MV_MAX_SG_CT / 2,
526	.dma_boundary		= MV_DMA_BOUNDARY,
527};
528
529static const struct ata_port_operations mv5_ops = {
530	.tf_load		= ata_tf_load,
531	.tf_read		= ata_tf_read,
532	.check_status		= ata_check_status,
533	.exec_command		= ata_exec_command,
534	.dev_select		= ata_std_dev_select,
535
536	.qc_prep		= mv_qc_prep,
537	.qc_issue		= mv_qc_issue,
538	.data_xfer		= ata_data_xfer,
539
540	.irq_clear		= ata_noop_irq_clear,
541	.irq_on			= ata_irq_on,
542
543	.error_handler		= mv_error_handler,
544	.freeze			= mv_eh_freeze,
545	.thaw			= mv_eh_thaw,
546
547	.scr_read		= mv5_scr_read,
548	.scr_write		= mv5_scr_write,
549
550	.port_start		= mv_port_start,
551	.port_stop		= mv_port_stop,
552};
553
554static const struct ata_port_operations mv6_ops = {
555	.dev_config             = mv6_dev_config,
556	.tf_load		= ata_tf_load,
557	.tf_read		= ata_tf_read,
558	.check_status		= ata_check_status,
559	.exec_command		= ata_exec_command,
560	.dev_select		= ata_std_dev_select,
561
562	.qc_prep		= mv_qc_prep,
563	.qc_issue		= mv_qc_issue,
564	.data_xfer		= ata_data_xfer,
565
566	.irq_clear		= ata_noop_irq_clear,
567	.irq_on			= ata_irq_on,
568
569	.error_handler		= mv_error_handler,
570	.freeze			= mv_eh_freeze,
571	.thaw			= mv_eh_thaw,
572	.qc_defer		= ata_std_qc_defer,
573
574	.scr_read		= mv_scr_read,
575	.scr_write		= mv_scr_write,
576
577	.port_start		= mv_port_start,
578	.port_stop		= mv_port_stop,
579};
580
581static const struct ata_port_operations mv_iie_ops = {
582	.tf_load		= ata_tf_load,
583	.tf_read		= ata_tf_read,
584	.check_status		= ata_check_status,
585	.exec_command		= ata_exec_command,
586	.dev_select		= ata_std_dev_select,
587
588	.qc_prep		= mv_qc_prep_iie,
589	.qc_issue		= mv_qc_issue,
590	.data_xfer		= ata_data_xfer,
591
592	.irq_clear		= ata_noop_irq_clear,
593	.irq_on			= ata_irq_on,
594
595	.error_handler		= mv_error_handler,
596	.freeze			= mv_eh_freeze,
597	.thaw			= mv_eh_thaw,
598	.qc_defer		= ata_std_qc_defer,
599
600	.scr_read		= mv_scr_read,
601	.scr_write		= mv_scr_write,
602
603	.port_start		= mv_port_start,
604	.port_stop		= mv_port_stop,
605};
606
607static const struct ata_port_info mv_port_info[] = {
608	{  /* chip_504x */
609		.flags		= MV_COMMON_FLAGS,
610		.pio_mask	= 0x1f,	/* pio0-4 */
611		.udma_mask	= ATA_UDMA6,
612		.port_ops	= &mv5_ops,
613	},
614	{  /* chip_508x */
615		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
616		.pio_mask	= 0x1f,	/* pio0-4 */
617		.udma_mask	= ATA_UDMA6,
618		.port_ops	= &mv5_ops,
619	},
620	{  /* chip_5080 */
621		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
622		.pio_mask	= 0x1f,	/* pio0-4 */
623		.udma_mask	= ATA_UDMA6,
624		.port_ops	= &mv5_ops,
625	},
626	{  /* chip_604x */
627		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
628				  ATA_FLAG_NCQ,
629		.pio_mask	= 0x1f,	/* pio0-4 */
630		.udma_mask	= ATA_UDMA6,
631		.port_ops	= &mv6_ops,
632	},
633	{  /* chip_608x */
634		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
635				  ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
636		.pio_mask	= 0x1f,	/* pio0-4 */
637		.udma_mask	= ATA_UDMA6,
638		.port_ops	= &mv6_ops,
639	},
640	{  /* chip_6042 */
641		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
642				  ATA_FLAG_NCQ,
643		.pio_mask	= 0x1f,	/* pio0-4 */
644		.udma_mask	= ATA_UDMA6,
645		.port_ops	= &mv_iie_ops,
646	},
647	{  /* chip_7042 */
648		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
649				  ATA_FLAG_NCQ,
650		.pio_mask	= 0x1f,	/* pio0-4 */
651		.udma_mask	= ATA_UDMA6,
652		.port_ops	= &mv_iie_ops,
653	},
654	{  /* chip_soc */
655		.flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
656		.pio_mask = 0x1f,      /* pio0-4 */
657		.udma_mask = ATA_UDMA6,
658		.port_ops = &mv_iie_ops,
659	},
660};
661
662static const struct pci_device_id mv_pci_tbl[] = {
663	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
664	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
665	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
666	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
667	/* RocketRAID 1740/174x have different identifiers */
668	{ PCI_VDEVICE(TTI, 0x1740), chip_508x },
669	{ PCI_VDEVICE(TTI, 0x1742), chip_508x },
670
671	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
672	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
673	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
674	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
675	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
676
677	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
678
679	/* Adaptec 1430SA */
680	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
681
682	/* Marvell 7042 support */
683	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
684
685	/* Highpoint RocketRAID PCIe series */
686	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
687	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
688
689	{ }			/* terminate list */
690};
691
692static const struct mv_hw_ops mv5xxx_ops = {
693	.phy_errata		= mv5_phy_errata,
694	.enable_leds		= mv5_enable_leds,
695	.read_preamp		= mv5_read_preamp,
696	.reset_hc		= mv5_reset_hc,
697	.reset_flash		= mv5_reset_flash,
698	.reset_bus		= mv5_reset_bus,
699};
700
701static const struct mv_hw_ops mv6xxx_ops = {
702	.phy_errata		= mv6_phy_errata,
703	.enable_leds		= mv6_enable_leds,
704	.read_preamp		= mv6_read_preamp,
705	.reset_hc		= mv6_reset_hc,
706	.reset_flash		= mv6_reset_flash,
707	.reset_bus		= mv_reset_pci_bus,
708};
709
710static const struct mv_hw_ops mv_soc_ops = {
711	.phy_errata		= mv6_phy_errata,
712	.enable_leds		= mv_soc_enable_leds,
713	.read_preamp		= mv_soc_read_preamp,
714	.reset_hc		= mv_soc_reset_hc,
715	.reset_flash		= mv_soc_reset_flash,
716	.reset_bus		= mv_soc_reset_bus,
717};
718
719/*
720 * Functions
721 */
722
723static inline void writelfl(unsigned long data, void __iomem *addr)
724{
725	writel(data, addr);
726	(void) readl(addr);	/* flush to avoid PCI posted write */
727}
728
729static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
730{
731	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
732}
733
734static inline unsigned int mv_hc_from_port(unsigned int port)
735{
736	return port >> MV_PORT_HC_SHIFT;
737}
738
739static inline unsigned int mv_hardport_from_port(unsigned int port)
740{
741	return port & MV_PORT_MASK;
742}
743
744static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
745						 unsigned int port)
746{
747	return mv_hc_base(base, mv_hc_from_port(port));
748}
749
750static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
751{
752	return  mv_hc_base_from_port(base, port) +
753		MV_SATAHC_ARBTR_REG_SZ +
754		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
755}
756
757static inline void __iomem *mv_host_base(struct ata_host *host)
758{
759	struct mv_host_priv *hpriv = host->private_data;
760	return hpriv->base;
761}
762
763static inline void __iomem *mv_ap_base(struct ata_port *ap)
764{
765	return mv_port_base(mv_host_base(ap->host), ap->port_no);
766}
767
768static inline int mv_get_hc_count(unsigned long port_flags)
769{
770	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
771}
772
773static void mv_set_edma_ptrs(void __iomem *port_mmio,
774			     struct mv_host_priv *hpriv,
775			     struct mv_port_priv *pp)
776{
777	u32 index;
778
779	/*
780	 * initialize request queue
781	 */
782	index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
783
784	WARN_ON(pp->crqb_dma & 0x3ff);
785	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
786	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
787		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
788
789	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
790		writelfl((pp->crqb_dma & 0xffffffff) | index,
791			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
792	else
793		writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
794
795	/*
796	 * initialize response queue
797	 */
798	index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
799
800	WARN_ON(pp->crpb_dma & 0xff);
801	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
802
803	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
804		writelfl((pp->crpb_dma & 0xffffffff) | index,
805			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
806	else
807		writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
808
809	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
810		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
811}
812
813/**
814 *      mv_start_dma - Enable eDMA engine
815 *      @base: port base address
816 *      @pp: port private data
817 *
818 *      Verify the local cache of the eDMA state is accurate with a
819 *      WARN_ON.
820 *
821 *      LOCKING:
822 *      Inherited from caller.
823 */
824static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
825			 struct mv_port_priv *pp, u8 protocol)
826{
827	int want_ncq = (protocol == ATA_PROT_NCQ);
828
829	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
830		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
831		if (want_ncq != using_ncq)
832			__mv_stop_dma(ap);
833	}
834	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
835		struct mv_host_priv *hpriv = ap->host->private_data;
836		int hard_port = mv_hardport_from_port(ap->port_no);
837		void __iomem *hc_mmio = mv_hc_base_from_port(
838					mv_host_base(ap->host), hard_port);
839		u32 hc_irq_cause, ipending;
840
841		/* clear EDMA event indicators, if any */
842		writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
843
844		/* clear EDMA interrupt indicator, if any */
845		hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
846		ipending = (DEV_IRQ << hard_port) |
847				(CRPB_DMA_DONE << hard_port);
848		if (hc_irq_cause & ipending) {
849			writelfl(hc_irq_cause & ~ipending,
850				 hc_mmio + HC_IRQ_CAUSE_OFS);
851		}
852
853		mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
854
855		/* clear FIS IRQ Cause */
856		writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
857
858		mv_set_edma_ptrs(port_mmio, hpriv, pp);
859
860		writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
861		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
862	}
863	WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
864}
865
866/**
867 *      __mv_stop_dma - Disable eDMA engine
868 *      @ap: ATA channel to manipulate
869 *
870 *      Verify the local cache of the eDMA state is accurate with a
871 *      WARN_ON.
872 *
873 *      LOCKING:
874 *      Inherited from caller.
875 */
876static int __mv_stop_dma(struct ata_port *ap)
877{
878	void __iomem *port_mmio = mv_ap_base(ap);
879	struct mv_port_priv *pp	= ap->private_data;
880	u32 reg;
881	int i, err = 0;
882
883	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
884		/* Disable EDMA if active.   The disable bit auto clears.
885		 */
886		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
887		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
888	} else {
889		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
890	}
891
892	/* now properly wait for the eDMA to stop */
893	for (i = 1000; i > 0; i--) {
894		reg = readl(port_mmio + EDMA_CMD_OFS);
895		if (!(reg & EDMA_EN))
896			break;
897
898		udelay(100);
899	}
900
901	if (reg & EDMA_EN) {
902		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
903		err = -EIO;
904	}
905
906	return err;
907}
908
909static int mv_stop_dma(struct ata_port *ap)
910{
911	unsigned long flags;
912	int rc;
913
914	spin_lock_irqsave(&ap->host->lock, flags);
915	rc = __mv_stop_dma(ap);
916	spin_unlock_irqrestore(&ap->host->lock, flags);
917
918	return rc;
919}
920
921#ifdef ATA_DEBUG
922static void mv_dump_mem(void __iomem *start, unsigned bytes)
923{
924	int b, w;
925	for (b = 0; b < bytes; ) {
926		DPRINTK("%p: ", start + b);
927		for (w = 0; b < bytes && w < 4; w++) {
928			printk("%08x ", readl(start + b));
929			b += sizeof(u32);
930		}
931		printk("\n");
932	}
933}
934#endif
935
936static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
937{
938#ifdef ATA_DEBUG
939	int b, w;
940	u32 dw;
941	for (b = 0; b < bytes; ) {
942		DPRINTK("%02x: ", b);
943		for (w = 0; b < bytes && w < 4; w++) {
944			(void) pci_read_config_dword(pdev, b, &dw);
945			printk("%08x ", dw);
946			b += sizeof(u32);
947		}
948		printk("\n");
949	}
950#endif
951}
952static void mv_dump_all_regs(void __iomem *mmio_base, int port,
953			     struct pci_dev *pdev)
954{
955#ifdef ATA_DEBUG
956	void __iomem *hc_base = mv_hc_base(mmio_base,
957					   port >> MV_PORT_HC_SHIFT);
958	void __iomem *port_base;
959	int start_port, num_ports, p, start_hc, num_hcs, hc;
960
961	if (0 > port) {
962		start_hc = start_port = 0;
963		num_ports = 8;		/* shld be benign for 4 port devs */
964		num_hcs = 2;
965	} else {
966		start_hc = port >> MV_PORT_HC_SHIFT;
967		start_port = port;
968		num_ports = num_hcs = 1;
969	}
970	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
971		num_ports > 1 ? num_ports - 1 : start_port);
972
973	if (NULL != pdev) {
974		DPRINTK("PCI config space regs:\n");
975		mv_dump_pci_cfg(pdev, 0x68);
976	}
977	DPRINTK("PCI regs:\n");
978	mv_dump_mem(mmio_base+0xc00, 0x3c);
979	mv_dump_mem(mmio_base+0xd00, 0x34);
980	mv_dump_mem(mmio_base+0xf00, 0x4);
981	mv_dump_mem(mmio_base+0x1d00, 0x6c);
982	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
983		hc_base = mv_hc_base(mmio_base, hc);
984		DPRINTK("HC regs (HC %i):\n", hc);
985		mv_dump_mem(hc_base, 0x1c);
986	}
987	for (p = start_port; p < start_port + num_ports; p++) {
988		port_base = mv_port_base(mmio_base, p);
989		DPRINTK("EDMA regs (port %i):\n", p);
990		mv_dump_mem(port_base, 0x54);
991		DPRINTK("SATA regs (port %i):\n", p);
992		mv_dump_mem(port_base+0x300, 0x60);
993	}
994#endif
995}
996
997static unsigned int mv_scr_offset(unsigned int sc_reg_in)
998{
999	unsigned int ofs;
1000
1001	switch (sc_reg_in) {
1002	case SCR_STATUS:
1003	case SCR_CONTROL:
1004	case SCR_ERROR:
1005		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1006		break;
1007	case SCR_ACTIVE:
1008		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
1009		break;
1010	default:
1011		ofs = 0xffffffffU;
1012		break;
1013	}
1014	return ofs;
1015}
1016
1017static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1018{
1019	unsigned int ofs = mv_scr_offset(sc_reg_in);
1020
1021	if (ofs != 0xffffffffU) {
1022		*val = readl(mv_ap_base(ap) + ofs);
1023		return 0;
1024	} else
1025		return -EINVAL;
1026}
1027
1028static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1029{
1030	unsigned int ofs = mv_scr_offset(sc_reg_in);
1031
1032	if (ofs != 0xffffffffU) {
1033		writelfl(val, mv_ap_base(ap) + ofs);
1034		return 0;
1035	} else
1036		return -EINVAL;
1037}
1038
1039static void mv6_dev_config(struct ata_device *adev)
1040{
1041	/*
1042	 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1043	 * See mv_qc_prep() for more info.
1044	 */
1045	if (adev->flags & ATA_DFLAG_NCQ)
1046		if (adev->max_sectors > ATA_MAX_SECTORS)
1047			adev->max_sectors = ATA_MAX_SECTORS;
1048}
1049
1050static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1051			void __iomem *port_mmio, int want_ncq)
1052{
1053	u32 cfg;
1054
1055	/* set up non-NCQ EDMA configuration */
1056	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
1057
1058	if (IS_GEN_I(hpriv))
1059		cfg |= (1 << 8);	/* enab config burst size mask */
1060
1061	else if (IS_GEN_II(hpriv))
1062		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1063
1064	else if (IS_GEN_IIE(hpriv)) {
1065		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1066		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
1067		cfg |= (1 << 18);	/* enab early completion */
1068		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
1069	}
1070
1071	if (want_ncq) {
1072		cfg |= EDMA_CFG_NCQ;
1073		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
1074	} else
1075		pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1076
1077	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1078}
1079
1080static void mv_port_free_dma_mem(struct ata_port *ap)
1081{
1082	struct mv_host_priv *hpriv = ap->host->private_data;
1083	struct mv_port_priv *pp = ap->private_data;
1084	int tag;
1085
1086	if (pp->crqb) {
1087		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1088		pp->crqb = NULL;
1089	}
1090	if (pp->crpb) {
1091		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1092		pp->crpb = NULL;
1093	}
1094	/*
1095	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1096	 * For later hardware, we have one unique sg_tbl per NCQ tag.
1097	 */
1098	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1099		if (pp->sg_tbl[tag]) {
1100			if (tag == 0 || !IS_GEN_I(hpriv))
1101				dma_pool_free(hpriv->sg_tbl_pool,
1102					      pp->sg_tbl[tag],
1103					      pp->sg_tbl_dma[tag]);
1104			pp->sg_tbl[tag] = NULL;
1105		}
1106	}
1107}
1108
1109/**
1110 *      mv_port_start - Port specific init/start routine.
1111 *      @ap: ATA channel to manipulate
1112 *
1113 *      Allocate and point to DMA memory, init port private memory,
1114 *      zero indices.
1115 *
1116 *      LOCKING:
1117 *      Inherited from caller.
1118 */
1119static int mv_port_start(struct ata_port *ap)
1120{
1121	struct device *dev = ap->host->dev;
1122	struct mv_host_priv *hpriv = ap->host->private_data;
1123	struct mv_port_priv *pp;
1124	void __iomem *port_mmio = mv_ap_base(ap);
1125	unsigned long flags;
1126	int tag;
1127
1128	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1129	if (!pp)
1130		return -ENOMEM;
1131	ap->private_data = pp;
1132
1133	pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1134	if (!pp->crqb)
1135		return -ENOMEM;
1136	memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1137
1138	pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1139	if (!pp->crpb)
1140		goto out_port_free_dma_mem;
1141	memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1142
1143	/*
1144	 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1145	 * For later hardware, we need one unique sg_tbl per NCQ tag.
1146	 */
1147	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1148		if (tag == 0 || !IS_GEN_I(hpriv)) {
1149			pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1150					      GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1151			if (!pp->sg_tbl[tag])
1152				goto out_port_free_dma_mem;
1153		} else {
1154			pp->sg_tbl[tag]     = pp->sg_tbl[0];
1155			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1156		}
1157	}
1158
1159	spin_lock_irqsave(&ap->host->lock, flags);
1160
1161	mv_edma_cfg(pp, hpriv, port_mmio, 0);
1162	mv_set_edma_ptrs(port_mmio, hpriv, pp);
1163
1164	spin_unlock_irqrestore(&ap->host->lock, flags);
1165
1166	/* Don't turn on EDMA here...do it before DMA commands only.  Else
1167	 * we'll be unable to send non-data, PIO, etc due to restricted access
1168	 * to shadow regs.
1169	 */
1170	return 0;
1171
1172out_port_free_dma_mem:
1173	mv_port_free_dma_mem(ap);
1174	return -ENOMEM;
1175}
1176
1177/**
1178 *      mv_port_stop - Port specific cleanup/stop routine.
1179 *      @ap: ATA channel to manipulate
1180 *
1181 *      Stop DMA, cleanup port memory.
1182 *
1183 *      LOCKING:
1184 *      This routine uses the host lock to protect the DMA stop.
1185 */
1186static void mv_port_stop(struct ata_port *ap)
1187{
1188	mv_stop_dma(ap);
1189	mv_port_free_dma_mem(ap);
1190}
1191
1192/**
1193 *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1194 *      @qc: queued command whose SG list to source from
1195 *
1196 *      Populate the SG list and mark the last entry.
1197 *
1198 *      LOCKING:
1199 *      Inherited from caller.
1200 */
1201static void mv_fill_sg(struct ata_queued_cmd *qc)
1202{
1203	struct mv_port_priv *pp = qc->ap->private_data;
1204	struct scatterlist *sg;
1205	struct mv_sg *mv_sg, *last_sg = NULL;
1206	unsigned int si;
1207
1208	mv_sg = pp->sg_tbl[qc->tag];
1209	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1210		dma_addr_t addr = sg_dma_address(sg);
1211		u32 sg_len = sg_dma_len(sg);
1212
1213		while (sg_len) {
1214			u32 offset = addr & 0xffff;
1215			u32 len = sg_len;
1216
1217			if ((offset + sg_len > 0x10000))
1218				len = 0x10000 - offset;
1219
1220			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1221			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1222			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1223
1224			sg_len -= len;
1225			addr += len;
1226
1227			last_sg = mv_sg;
1228			mv_sg++;
1229		}
1230	}
1231
1232	if (likely(last_sg))
1233		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1234}
1235
1236static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1237{
1238	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1239		(last ? CRQB_CMD_LAST : 0);
1240	*cmdw = cpu_to_le16(tmp);
1241}
1242
1243/**
1244 *      mv_qc_prep - Host specific command preparation.
1245 *      @qc: queued command to prepare
1246 *
1247 *      This routine simply redirects to the general purpose routine
1248 *      if command is not DMA.  Else, it handles prep of the CRQB
1249 *      (command request block), does some sanity checking, and calls
1250 *      the SG load routine.
1251 *
1252 *      LOCKING:
1253 *      Inherited from caller.
1254 */
1255static void mv_qc_prep(struct ata_queued_cmd *qc)
1256{
1257	struct ata_port *ap = qc->ap;
1258	struct mv_port_priv *pp = ap->private_data;
1259	__le16 *cw;
1260	struct ata_taskfile *tf;
1261	u16 flags = 0;
1262	unsigned in_index;
1263
1264	if ((qc->tf.protocol != ATA_PROT_DMA) &&
1265	    (qc->tf.protocol != ATA_PROT_NCQ))
1266		return;
1267
1268	/* Fill in command request block
1269	 */
1270	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1271		flags |= CRQB_FLAG_READ;
1272	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1273	flags |= qc->tag << CRQB_TAG_SHIFT;
1274
1275	/* get current queue index from software */
1276	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1277
1278	pp->crqb[in_index].sg_addr =
1279		cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1280	pp->crqb[in_index].sg_addr_hi =
1281		cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1282	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1283
1284	cw = &pp->crqb[in_index].ata_cmd[0];
1285	tf = &qc->tf;
1286
1287	/* Sadly, the CRQB cannot accomodate all registers--there are
1288	 * only 11 bytes...so we must pick and choose required
1289	 * registers based on the command.  So, we drop feature and
1290	 * hob_feature for [RW] DMA commands, but they are needed for
1291	 * NCQ.  NCQ will drop hob_nsect.
1292	 */
1293	switch (tf->command) {
1294	case ATA_CMD_READ:
1295	case ATA_CMD_READ_EXT:
1296	case ATA_CMD_WRITE:
1297	case ATA_CMD_WRITE_EXT:
1298	case ATA_CMD_WRITE_FUA_EXT:
1299		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1300		break;
1301	case ATA_CMD_FPDMA_READ:
1302	case ATA_CMD_FPDMA_WRITE:
1303		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1304		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1305		break;
1306	default:
1307		/* The only other commands EDMA supports in non-queued and
1308		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1309		 * of which are defined/used by Linux.  If we get here, this
1310		 * driver needs work.
1311		 *
1312		 * FIXME: modify libata to give qc_prep a return value and
1313		 * return error here.
1314		 */
1315		BUG_ON(tf->command);
1316		break;
1317	}
1318	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1319	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1320	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1321	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1322	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1323	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1324	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1325	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1326	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
1327
1328	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1329		return;
1330	mv_fill_sg(qc);
1331}
1332
1333/**
1334 *      mv_qc_prep_iie - Host specific command preparation.
1335 *      @qc: queued command to prepare
1336 *
1337 *      This routine simply redirects to the general purpose routine
1338 *      if command is not DMA.  Else, it handles prep of the CRQB
1339 *      (command request block), does some sanity checking, and calls
1340 *      the SG load routine.
1341 *
1342 *      LOCKING:
1343 *      Inherited from caller.
1344 */
1345static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1346{
1347	struct ata_port *ap = qc->ap;
1348	struct mv_port_priv *pp = ap->private_data;
1349	struct mv_crqb_iie *crqb;
1350	struct ata_taskfile *tf;
1351	unsigned in_index;
1352	u32 flags = 0;
1353
1354	if ((qc->tf.protocol != ATA_PROT_DMA) &&
1355	    (qc->tf.protocol != ATA_PROT_NCQ))
1356		return;
1357
1358	/* Fill in Gen IIE command request block
1359	 */
1360	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1361		flags |= CRQB_FLAG_READ;
1362
1363	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1364	flags |= qc->tag << CRQB_TAG_SHIFT;
1365	flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1366
1367	/* get current queue index from software */
1368	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1369
1370	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1371	crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1372	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1373	crqb->flags = cpu_to_le32(flags);
1374
1375	tf = &qc->tf;
1376	crqb->ata_cmd[0] = cpu_to_le32(
1377			(tf->command << 16) |
1378			(tf->feature << 24)
1379		);
1380	crqb->ata_cmd[1] = cpu_to_le32(
1381			(tf->lbal << 0) |
1382			(tf->lbam << 8) |
1383			(tf->lbah << 16) |
1384			(tf->device << 24)
1385		);
1386	crqb->ata_cmd[2] = cpu_to_le32(
1387			(tf->hob_lbal << 0) |
1388			(tf->hob_lbam << 8) |
1389			(tf->hob_lbah << 16) |
1390			(tf->hob_feature << 24)
1391		);
1392	crqb->ata_cmd[3] = cpu_to_le32(
1393			(tf->nsect << 0) |
1394			(tf->hob_nsect << 8)
1395		);
1396
1397	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1398		return;
1399	mv_fill_sg(qc);
1400}
1401
1402/**
1403 *      mv_qc_issue - Initiate a command to the host
1404 *      @qc: queued command to start
1405 *
1406 *      This routine simply redirects to the general purpose routine
1407 *      if command is not DMA.  Else, it sanity checks our local
1408 *      caches of the request producer/consumer indices then enables
1409 *      DMA and bumps the request producer index.
1410 *
1411 *      LOCKING:
1412 *      Inherited from caller.
1413 */
1414static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1415{
1416	struct ata_port *ap = qc->ap;
1417	void __iomem *port_mmio = mv_ap_base(ap);
1418	struct mv_port_priv *pp = ap->private_data;
1419	u32 in_index;
1420
1421	if ((qc->tf.protocol != ATA_PROT_DMA) &&
1422	    (qc->tf.protocol != ATA_PROT_NCQ)) {
1423		/* We're about to send a non-EDMA capable command to the
1424		 * port.  Turn off EDMA so there won't be problems accessing
1425		 * shadow block, etc registers.
1426		 */
1427		__mv_stop_dma(ap);
1428		return ata_qc_issue_prot(qc);
1429	}
1430
1431	mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1432
1433	pp->req_idx++;
1434
1435	in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1436
1437	/* and write the request in pointer to kick the EDMA to life */
1438	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1439		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1440
1441	return 0;
1442}
1443
1444/**
1445 *      mv_err_intr - Handle error interrupts on the port
1446 *      @ap: ATA channel to manipulate
1447 *      @reset_allowed: bool: 0 == don't trigger from reset here
1448 *
1449 *      In most cases, just clear the interrupt and move on.  However,
1450 *      some cases require an eDMA reset, which is done right before
1451 *      the COMRESET in mv_phy_reset().  The SERR case requires a
1452 *      clear of pending errors in the SATA SERROR register.  Finally,
1453 *      if the port disabled DMA, update our cached copy to match.
1454 *
1455 *      LOCKING:
1456 *      Inherited from caller.
1457 */
1458static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1459{
1460	void __iomem *port_mmio = mv_ap_base(ap);
1461	u32 edma_err_cause, eh_freeze_mask, serr = 0;
1462	struct mv_port_priv *pp = ap->private_data;
1463	struct mv_host_priv *hpriv = ap->host->private_data;
1464	unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1465	unsigned int action = 0, err_mask = 0;
1466	struct ata_eh_info *ehi = &ap->link.eh_info;
1467
1468	ata_ehi_clear_desc(ehi);
1469
1470	if (!edma_enabled) {
1471		/* just a guess: do we need to do this? should we
1472		 * expand this, and do it in all cases?
1473		 */
1474		sata_scr_read(&ap->link, SCR_ERROR, &serr);
1475		sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1476	}
1477
1478	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1479
1480	ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1481
1482	/*
1483	 * all generations share these EDMA error cause bits
1484	 */
1485
1486	if (edma_err_cause & EDMA_ERR_DEV)
1487		err_mask |= AC_ERR_DEV;
1488	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1489			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1490			EDMA_ERR_INTRL_PAR)) {
1491		err_mask |= AC_ERR_ATA_BUS;
1492		action |= ATA_EH_RESET;
1493		ata_ehi_push_desc(ehi, "parity error");
1494	}
1495	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1496		ata_ehi_hotplugged(ehi);
1497		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1498			"dev disconnect" : "dev connect");
1499		action |= ATA_EH_RESET;
1500	}
1501
1502	if (IS_GEN_I(hpriv)) {
1503		eh_freeze_mask = EDMA_EH_FREEZE_5;
1504
1505		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1506			pp = ap->private_data;
1507			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1508			ata_ehi_push_desc(ehi, "EDMA self-disable");
1509		}
1510	} else {
1511		eh_freeze_mask = EDMA_EH_FREEZE;
1512
1513		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1514			pp = ap->private_data;
1515			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1516			ata_ehi_push_desc(ehi, "EDMA self-disable");
1517		}
1518
1519		if (edma_err_cause & EDMA_ERR_SERR) {
1520			sata_scr_read(&ap->link, SCR_ERROR, &serr);
1521			sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1522			err_mask = AC_ERR_ATA_BUS;
1523			action |= ATA_EH_RESET;
1524		}
1525	}
1526
1527	/* Clear EDMA now that SERR cleanup done */
1528	writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1529
1530	if (!err_mask) {
1531		err_mask = AC_ERR_OTHER;
1532		action |= ATA_EH_RESET;
1533	}
1534
1535	ehi->serror |= serr;
1536	ehi->action |= action;
1537
1538	if (qc)
1539		qc->err_mask |= err_mask;
1540	else
1541		ehi->err_mask |= err_mask;
1542
1543	if (edma_err_cause & eh_freeze_mask)
1544		ata_port_freeze(ap);
1545	else
1546		ata_port_abort(ap);
1547}
1548
1549static void mv_intr_pio(struct ata_port *ap)
1550{
1551	struct ata_queued_cmd *qc;
1552	u8 ata_status;
1553
1554	/* ignore spurious intr if drive still BUSY */
1555	ata_status = readb(ap->ioaddr.status_addr);
1556	if (unlikely(ata_status & ATA_BUSY))
1557		return;
1558
1559	/* get active ATA command */
1560	qc = ata_qc_from_tag(ap, ap->link.active_tag);
1561	if (unlikely(!qc))			/* no active tag */
1562		return;
1563	if (qc->tf.flags & ATA_TFLAG_POLLING)	/* polling; we don't own qc */
1564		return;
1565
1566	/* and finally, complete the ATA command */
1567	qc->err_mask |= ac_err_mask(ata_status);
1568	ata_qc_complete(qc);
1569}
1570
1571static void mv_intr_edma(struct ata_port *ap)
1572{
1573	void __iomem *port_mmio = mv_ap_base(ap);
1574	struct mv_host_priv *hpriv = ap->host->private_data;
1575	struct mv_port_priv *pp = ap->private_data;
1576	struct ata_queued_cmd *qc;
1577	u32 out_index, in_index;
1578	bool work_done = false;
1579
1580	/* get h/w response queue pointer */
1581	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1582			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1583
1584	while (1) {
1585		u16 status;
1586		unsigned int tag;
1587
1588		/* get s/w response queue last-read pointer, and compare */
1589		out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1590		if (in_index == out_index)
1591			break;
1592
1593		/* 50xx: get active ATA command */
1594		if (IS_GEN_I(hpriv))
1595			tag = ap->link.active_tag;
1596
1597		/* Gen II/IIE: get active ATA command via tag, to enable
1598		 * support for queueing.  this works transparently for
1599		 * queued and non-queued modes.
1600		 */
1601		else
1602			tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1603
1604		qc = ata_qc_from_tag(ap, tag);
1605
1606		/* For non-NCQ mode, the lower 8 bits of status
1607		 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1608		 * which should be zero if all went well.
1609		 */
1610		status = le16_to_cpu(pp->crpb[out_index].flags);
1611		if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1612			mv_err_intr(ap, qc);
1613			return;
1614		}
1615
1616		/* and finally, complete the ATA command */
1617		if (qc) {
1618			qc->err_mask |=
1619				ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1620			ata_qc_complete(qc);
1621		}
1622
1623		/* advance software response queue pointer, to
1624		 * indicate (after the loop completes) to hardware
1625		 * that we have consumed a response queue entry.
1626		 */
1627		work_done = true;
1628		pp->resp_idx++;
1629	}
1630
1631	if (work_done)
1632		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1633			 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1634			 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1635}
1636
1637/**
1638 *      mv_host_intr - Handle all interrupts on the given host controller
1639 *      @host: host specific structure
1640 *      @relevant: port error bits relevant to this host controller
1641 *      @hc: which host controller we're to look at
1642 *
1643 *      Read then write clear the HC interrupt status then walk each
1644 *      port connected to the HC and see if it needs servicing.  Port
1645 *      success ints are reported in the HC interrupt status reg, the
1646 *      port error ints are reported in the higher level main
1647 *      interrupt status register and thus are passed in via the
1648 *      'relevant' argument.
1649 *
1650 *      LOCKING:
1651 *      Inherited from caller.
1652 */
1653static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1654{
1655	struct mv_host_priv *hpriv = host->private_data;
1656	void __iomem *mmio = hpriv->base;
1657	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1658	u32 hc_irq_cause;
1659	int port, port0, last_port;
1660
1661	if (hc == 0)
1662		port0 = 0;
1663	else
1664		port0 = MV_PORTS_PER_HC;
1665
1666	if (HAS_PCI(host))
1667		last_port = port0 + MV_PORTS_PER_HC;
1668	else
1669		last_port = port0 + hpriv->n_ports;
1670	/* we'll need the HC success int register in most cases */
1671	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1672	if (!hc_irq_cause)
1673		return;
1674
1675	writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1676
1677	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1678		hc, relevant, hc_irq_cause);
1679
1680	for (port = port0; port < last_port; port++) {
1681		struct ata_port *ap = host->ports[port];
1682		struct mv_port_priv *pp;
1683		int have_err_bits, hard_port, shift;
1684
1685		if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1686			continue;
1687
1688		pp = ap->private_data;
1689
1690		shift = port << 1;		/* (port * 2) */
1691		if (port >= MV_PORTS_PER_HC) {
1692			shift++;	/* skip bit 8 in the HC Main IRQ reg */
1693		}
1694		have_err_bits = ((PORT0_ERR << shift) & relevant);
1695
1696		if (unlikely(have_err_bits)) {
1697			struct ata_queued_cmd *qc;
1698
1699			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1700			if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1701				continue;
1702
1703			mv_err_intr(ap, qc);
1704			continue;
1705		}
1706
1707		hard_port = mv_hardport_from_port(port); /* range 0..3 */
1708
1709		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1710			if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1711				mv_intr_edma(ap);
1712		} else {
1713			if ((DEV_IRQ << hard_port) & hc_irq_cause)
1714				mv_intr_pio(ap);
1715		}
1716	}
1717	VPRINTK("EXIT\n");
1718}
1719
1720static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1721{
1722	struct mv_host_priv *hpriv = host->private_data;
1723	struct ata_port *ap;
1724	struct ata_queued_cmd *qc;
1725	struct ata_eh_info *ehi;
1726	unsigned int i, err_mask, printed = 0;
1727	u32 err_cause;
1728
1729	err_cause = readl(mmio + hpriv->irq_cause_ofs);
1730
1731	dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1732		   err_cause);
1733
1734	DPRINTK("All regs @ PCI error\n");
1735	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1736
1737	writelfl(0, mmio + hpriv->irq_cause_ofs);
1738
1739	for (i = 0; i < host->n_ports; i++) {
1740		ap = host->ports[i];
1741		if (!ata_link_offline(&ap->link)) {
1742			ehi = &ap->link.eh_info;
1743			ata_ehi_clear_desc(ehi);
1744			if (!printed++)
1745				ata_ehi_push_desc(ehi,
1746					"PCI err cause 0x%08x", err_cause);
1747			err_mask = AC_ERR_HOST_BUS;
1748			ehi->action = ATA_EH_RESET;
1749			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1750			if (qc)
1751				qc->err_mask |= err_mask;
1752			else
1753				ehi->err_mask |= err_mask;
1754
1755			ata_port_freeze(ap);
1756		}
1757	}
1758}
1759
1760/**
1761 *      mv_interrupt - Main interrupt event handler
1762 *      @irq: unused
1763 *      @dev_instance: private data; in this case the host structure
1764 *
1765 *      Read the read only register to determine if any host
1766 *      controllers have pending interrupts.  If so, call lower level
1767 *      routine to handle.  Also check for PCI errors which are only
1768 *      reported here.
1769 *
1770 *      LOCKING:
1771 *      This routine holds the host lock while processing pending
1772 *      interrupts.
1773 */
1774static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1775{
1776	struct ata_host *host = dev_instance;
1777	struct mv_host_priv *hpriv = host->private_data;
1778	unsigned int hc, handled = 0, n_hcs;
1779	void __iomem *mmio = hpriv->base;
1780	u32 irq_stat, irq_mask;
1781
1782	spin_lock(&host->lock);
1783
1784	irq_stat = readl(hpriv->main_cause_reg_addr);
1785	irq_mask = readl(hpriv->main_mask_reg_addr);
1786
1787	/* check the cases where we either have nothing pending or have read
1788	 * a bogus register value which can indicate HW removal or PCI fault
1789	 */
1790	if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1791		goto out_unlock;
1792
1793	n_hcs = mv_get_hc_count(host->ports[0]->flags);
1794
1795	if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1796		mv_pci_error(host, mmio);
1797		handled = 1;
1798		goto out_unlock;	/* skip all other HC irq handling */
1799	}
1800
1801	for (hc = 0; hc < n_hcs; hc++) {
1802		u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1803		if (relevant) {
1804			mv_host_intr(host, relevant, hc);
1805			handled = 1;
1806		}
1807	}
1808
1809out_unlock:
1810	spin_unlock(&host->lock);
1811
1812	return IRQ_RETVAL(handled);
1813}
1814
1815static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1816{
1817	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1818	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1819
1820	return hc_mmio + ofs;
1821}
1822
1823static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1824{
1825	unsigned int ofs;
1826
1827	switch (sc_reg_in) {
1828	case SCR_STATUS:
1829	case SCR_ERROR:
1830	case SCR_CONTROL:
1831		ofs = sc_reg_in * sizeof(u32);
1832		break;
1833	default:
1834		ofs = 0xffffffffU;
1835		break;
1836	}
1837	return ofs;
1838}
1839
1840static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1841{
1842	struct mv_host_priv *hpriv = ap->host->private_data;
1843	void __iomem *mmio = hpriv->base;
1844	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1845	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1846
1847	if (ofs != 0xffffffffU) {
1848		*val = readl(addr + ofs);
1849		return 0;
1850	} else
1851		return -EINVAL;
1852}
1853
1854static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1855{
1856	struct mv_host_priv *hpriv = ap->host->private_data;
1857	void __iomem *mmio = hpriv->base;
1858	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1859	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1860
1861	if (ofs != 0xffffffffU) {
1862		writelfl(val, addr + ofs);
1863		return 0;
1864	} else
1865		return -EINVAL;
1866}
1867
1868static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1869{
1870	struct pci_dev *pdev = to_pci_dev(host->dev);
1871	int early_5080;
1872
1873	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1874
1875	if (!early_5080) {
1876		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1877		tmp |= (1 << 0);
1878		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1879	}
1880
1881	mv_reset_pci_bus(host, mmio);
1882}
1883
1884static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1885{
1886	writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1887}
1888
1889static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1890			   void __iomem *mmio)
1891{
1892	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1893	u32 tmp;
1894
1895	tmp = readl(phy_mmio + MV5_PHY_MODE);
1896
1897	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
1898	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
1899}
1900
1901static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1902{
1903	u32 tmp;
1904
1905	writel(0, mmio + MV_GPIO_PORT_CTL);
1906
1907	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1908
1909	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1910	tmp |= ~(1 << 0);
1911	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1912}
1913
1914static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1915			   unsigned int port)
1916{
1917	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1918	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1919	u32 tmp;
1920	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1921
1922	if (fix_apm_sq) {
1923		tmp = readl(phy_mmio + MV5_LT_MODE);
1924		tmp |= (1 << 19);
1925		writel(tmp, phy_mmio + MV5_LT_MODE);
1926
1927		tmp = readl(phy_mmio + MV5_PHY_CTL);
1928		tmp &= ~0x3;
1929		tmp |= 0x1;
1930		writel(tmp, phy_mmio + MV5_PHY_CTL);
1931	}
1932
1933	tmp = readl(phy_mmio + MV5_PHY_MODE);
1934	tmp &= ~mask;
1935	tmp |= hpriv->signal[port].pre;
1936	tmp |= hpriv->signal[port].amps;
1937	writel(tmp, phy_mmio + MV5_PHY_MODE);
1938}
1939
1940
1941#undef ZERO
1942#define ZERO(reg) writel(0, port_mmio + (reg))
1943static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1944			     unsigned int port)
1945{
1946	void __iomem *port_mmio = mv_port_base(mmio, port);
1947
1948	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1949
1950	mv_channel_reset(hpriv, mmio, port);
1951
1952	ZERO(0x028);	/* command */
1953	writel(0x11f, port_mmio + EDMA_CFG_OFS);
1954	ZERO(0x004);	/* timer */
1955	ZERO(0x008);	/* irq err cause */
1956	ZERO(0x00c);	/* irq err mask */
1957	ZERO(0x010);	/* rq bah */
1958	ZERO(0x014);	/* rq inp */
1959	ZERO(0x018);	/* rq outp */
1960	ZERO(0x01c);	/* respq bah */
1961	ZERO(0x024);	/* respq outp */
1962	ZERO(0x020);	/* respq inp */
1963	ZERO(0x02c);	/* test control */
1964	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1965}
1966#undef ZERO
1967
1968#define ZERO(reg) writel(0, hc_mmio + (reg))
1969static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1970			unsigned int hc)
1971{
1972	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1973	u32 tmp;
1974
1975	ZERO(0x00c);
1976	ZERO(0x010);
1977	ZERO(0x014);
1978	ZERO(0x018);
1979
1980	tmp = readl(hc_mmio + 0x20);
1981	tmp &= 0x1c1c1c1c;
1982	tmp |= 0x03030303;
1983	writel(tmp, hc_mmio + 0x20);
1984}
1985#undef ZERO
1986
1987static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1988			unsigned int n_hc)
1989{
1990	unsigned int hc, port;
1991
1992	for (hc = 0; hc < n_hc; hc++) {
1993		for (port = 0; port < MV_PORTS_PER_HC; port++)
1994			mv5_reset_hc_port(hpriv, mmio,
1995					  (hc * MV_PORTS_PER_HC) + port);
1996
1997		mv5_reset_one_hc(hpriv, mmio, hc);
1998	}
1999
2000	return 0;
2001}
2002
2003#undef ZERO
2004#define ZERO(reg) writel(0, mmio + (reg))
2005static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2006{
2007	struct mv_host_priv *hpriv = host->private_data;
2008	u32 tmp;
2009
2010	tmp = readl(mmio + MV_PCI_MODE);
2011	tmp &= 0xff00ffff;
2012	writel(tmp, mmio + MV_PCI_MODE);
2013
2014	ZERO(MV_PCI_DISC_TIMER);
2015	ZERO(MV_PCI_MSI_TRIGGER);
2016	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2017	ZERO(HC_MAIN_IRQ_MASK_OFS);
2018	ZERO(MV_PCI_SERR_MASK);
2019	ZERO(hpriv->irq_cause_ofs);
2020	ZERO(hpriv->irq_mask_ofs);
2021	ZERO(MV_PCI_ERR_LOW_ADDRESS);
2022	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2023	ZERO(MV_PCI_ERR_ATTRIBUTE);
2024	ZERO(MV_PCI_ERR_COMMAND);
2025}
2026#undef ZERO
2027
2028static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2029{
2030	u32 tmp;
2031
2032	mv5_reset_flash(hpriv, mmio);
2033
2034	tmp = readl(mmio + MV_GPIO_PORT_CTL);
2035	tmp &= 0x3;
2036	tmp |= (1 << 5) | (1 << 6);
2037	writel(tmp, mmio + MV_GPIO_PORT_CTL);
2038}
2039
2040/**
2041 *      mv6_reset_hc - Perform the 6xxx global soft reset
2042 *      @mmio: base address of the HBA
2043 *
2044 *      This routine only applies to 6xxx parts.
2045 *
2046 *      LOCKING:
2047 *      Inherited from caller.
2048 */
2049static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2050			unsigned int n_hc)
2051{
2052	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2053	int i, rc = 0;
2054	u32 t;
2055
2056	/* Following procedure defined in PCI "main command and status
2057	 * register" table.
2058	 */
2059	t = readl(reg);
2060	writel(t | STOP_PCI_MASTER, reg);
2061
2062	for (i = 0; i < 1000; i++) {
2063		udelay(1);
2064		t = readl(reg);
2065		if (PCI_MASTER_EMPTY & t)
2066			break;
2067	}
2068	if (!(PCI_MASTER_EMPTY & t)) {
2069		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2070		rc = 1;
2071		goto done;
2072	}
2073
2074	/* set reset */
2075	i = 5;
2076	do {
2077		writel(t | GLOB_SFT_RST, reg);
2078		t = readl(reg);
2079		udelay(1);
2080	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
2081
2082	if (!(GLOB_SFT_RST & t)) {
2083		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2084		rc = 1;
2085		goto done;
2086	}
2087
2088	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
2089	i = 5;
2090	do {
2091		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2092		t = readl(reg);
2093		udelay(1);
2094	} while ((GLOB_SFT_RST & t) && (i-- > 0));
2095
2096	if (GLOB_SFT_RST & t) {
2097		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2098		rc = 1;
2099	}
2100done:
2101	return rc;
2102}
2103
2104static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2105			   void __iomem *mmio)
2106{
2107	void __iomem *port_mmio;
2108	u32 tmp;
2109
2110	tmp = readl(mmio + MV_RESET_CFG);
2111	if ((tmp & (1 << 0)) == 0) {
2112		hpriv->signal[idx].amps = 0x7 << 8;
2113		hpriv->signal[idx].pre = 0x1 << 5;
2114		return;
2115	}
2116
2117	port_mmio = mv_port_base(mmio, idx);
2118	tmp = readl(port_mmio + PHY_MODE2);
2119
2120	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
2121	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
2122}
2123
2124static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2125{
2126	writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2127}
2128
2129static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2130			   unsigned int port)
2131{
2132	void __iomem *port_mmio = mv_port_base(mmio, port);
2133
2134	u32 hp_flags = hpriv->hp_flags;
2135	int fix_phy_mode2 =
2136		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2137	int fix_phy_mode4 =
2138		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2139	u32 m2, tmp;
2140
2141	if (fix_phy_mode2) {
2142		m2 = readl(port_mmio + PHY_MODE2);
2143		m2 &= ~(1 << 16);
2144		m2 |= (1 << 31);
2145		writel(m2, port_mmio + PHY_MODE2);
2146
2147		udelay(200);
2148
2149		m2 = readl(port_mmio + PHY_MODE2);
2150		m2 &= ~((1 << 16) | (1 << 31));
2151		writel(m2, port_mmio + PHY_MODE2);
2152
2153		udelay(200);
2154	}
2155
2156	/* who knows what this magic does */
2157	tmp = readl(port_mmio + PHY_MODE3);
2158	tmp &= ~0x7F800000;
2159	tmp |= 0x2A800000;
2160	writel(tmp, port_mmio + PHY_MODE3);
2161
2162	if (fix_phy_mode4) {
2163		u32 m4;
2164
2165		m4 = readl(port_mmio + PHY_MODE4);
2166
2167		if (hp_flags & MV_HP_ERRATA_60X1B2)
2168			tmp = readl(port_mmio + 0x310);
2169
2170		m4 = (m4 & ~(1 << 1)) | (1 << 0);
2171
2172		writel(m4, port_mmio + PHY_MODE4);
2173
2174		if (hp_flags & MV_HP_ERRATA_60X1B2)
2175			writel(tmp, port_mmio + 0x310);
2176	}
2177
2178	/* Revert values of pre-emphasis and signal amps to the saved ones */
2179	m2 = readl(port_mmio + PHY_MODE2);
2180
2181	m2 &= ~MV_M2_PREAMP_MASK;
2182	m2 |= hpriv->signal[port].amps;
2183	m2 |= hpriv->signal[port].pre;
2184	m2 &= ~(1 << 16);
2185
2186	/* according to mvSata 3.6.1, some IIE values are fixed */
2187	if (IS_GEN_IIE(hpriv)) {
2188		m2 &= ~0xC30FF01F;
2189		m2 |= 0x0000900F;
2190	}
2191
2192	writel(m2, port_mmio + PHY_MODE2);
2193}
2194
2195/* TODO: use the generic LED interface to configure the SATA Presence */
2196/* & Acitivy LEDs on the board */
2197static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2198				      void __iomem *mmio)
2199{
2200	return;
2201}
2202
2203static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2204			   void __iomem *mmio)
2205{
2206	void __iomem *port_mmio;
2207	u32 tmp;
2208
2209	port_mmio = mv_port_base(mmio, idx);
2210	tmp = readl(port_mmio + PHY_MODE2);
2211
2212	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
2213	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
2214}
2215
2216#undef ZERO
2217#define ZERO(reg) writel(0, port_mmio + (reg))
2218static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2219					void __iomem *mmio, unsigned int port)
2220{
2221	void __iomem *port_mmio = mv_port_base(mmio, port);
2222
2223	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2224
2225	mv_channel_reset(hpriv, mmio, port);
2226
2227	ZERO(0x028);		/* command */
2228	writel(0x101f, port_mmio + EDMA_CFG_OFS);
2229	ZERO(0x004);		/* timer */
2230	ZERO(0x008);		/* irq err cause */
2231	ZERO(0x00c);		/* irq err mask */
2232	ZERO(0x010);		/* rq bah */
2233	ZERO(0x014);		/* rq inp */
2234	ZERO(0x018);		/* rq outp */
2235	ZERO(0x01c);		/* respq bah */
2236	ZERO(0x024);		/* respq outp */
2237	ZERO(0x020);		/* respq inp */
2238	ZERO(0x02c);		/* test control */
2239	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2240}
2241
2242#undef ZERO
2243
2244#define ZERO(reg) writel(0, hc_mmio + (reg))
2245static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2246				       void __iomem *mmio)
2247{
2248	void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2249
2250	ZERO(0x00c);
2251	ZERO(0x010);
2252	ZERO(0x014);
2253
2254}
2255
2256#undef ZERO
2257
2258static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2259				  void __iomem *mmio, unsigned int n_hc)
2260{
2261	unsigned int port;
2262
2263	for (port = 0; port < hpriv->n_ports; port++)
2264		mv_soc_reset_hc_port(hpriv, mmio, port);
2265
2266	mv_soc_reset_one_hc(hpriv, mmio);
2267
2268	return 0;
2269}
2270
2271static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2272				      void __iomem *mmio)
2273{
2274	return;
2275}
2276
2277static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2278{
2279	return;
2280}
2281
2282static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2283			     unsigned int port_no)
2284{
2285	void __iomem *port_mmio = mv_port_base(mmio, port_no);
2286
2287	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2288
2289	if (IS_GEN_II(hpriv)) {
2290		u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2291		ifctl |= (1 << 7);		/* enable gen2i speed */
2292		ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2293		writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2294	}
2295
2296	udelay(25);		/* allow reset propagation */
2297
2298	/* Spec never mentions clearing the bit.  Marvell's driver does
2299	 * clear the bit, however.
2300	 */
2301	writelfl(0, port_mmio + EDMA_CMD_OFS);
2302
2303	hpriv->ops->phy_errata(hpriv, mmio, port_no);
2304
2305	if (IS_GEN_I(hpriv))
2306		mdelay(1);
2307}
2308
2309/**
2310 *      mv_phy_reset - Perform eDMA reset followed by COMRESET
2311 *      @ap: ATA channel to manipulate
2312 *
2313 *      Part of this is taken from __sata_phy_reset and modified to
2314 *      not sleep since this routine gets called from interrupt level.
2315 *
2316 *      LOCKING:
2317 *      Inherited from caller.  This is coded to safe to call at
2318 *      interrupt level, i.e. it does not sleep.
2319 */
2320static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2321			 unsigned long deadline)
2322{
2323	struct mv_port_priv *pp	= ap->private_data;
2324	struct mv_host_priv *hpriv = ap->host->private_data;
2325	void __iomem *port_mmio = mv_ap_base(ap);
2326	int retry = 5;
2327	u32 sstatus;
2328
2329	VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2330
2331#ifdef DEBUG
2332	{
2333		u32 sstatus, serror, scontrol;
2334
2335		mv_scr_read(ap, SCR_STATUS, &sstatus);
2336		mv_scr_read(ap, SCR_ERROR, &serror);
2337		mv_scr_read(ap, SCR_CONTROL, &scontrol);
2338		DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2339			"SCtrl 0x%08x\n", sstatus, serror, scontrol);
2340	}
2341#endif
2342
2343	/* Issue COMRESET via SControl */
2344comreset_retry:
2345	sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2346	msleep(1);
2347
2348	sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2349	msleep(20);
2350
2351	do {
2352		sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2353		if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2354			break;
2355
2356		msleep(1);
2357	} while (time_before(jiffies, deadline));
2358
2359	/* work around errata */
2360	if (IS_GEN_II(hpriv) &&
2361	    (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2362	    (retry-- > 0))
2363		goto comreset_retry;
2364
2365#ifdef DEBUG
2366	{
2367		u32 sstatus, serror, scontrol;
2368
2369		mv_scr_read(ap, SCR_STATUS, &sstatus);
2370		mv_scr_read(ap, SCR_ERROR, &serror);
2371		mv_scr_read(ap, SCR_CONTROL, &scontrol);
2372		DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2373			"SCtrl 0x%08x\n", sstatus, serror, scontrol);
2374	}
2375#endif
2376
2377	if (ata_link_offline(&ap->link)) {
2378		*class = ATA_DEV_NONE;
2379		return;
2380	}
2381
2382	/* even after SStatus reflects that device is ready,
2383	 * it seems to take a while for link to be fully
2384	 * established (and thus Status no longer 0x80/0x7F),
2385	 * so we poll a bit for that, here.
2386	 */
2387	retry = 20;
2388	while (1) {
2389		u8 drv_stat = ata_check_status(ap);
2390		if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2391			break;
2392		msleep(500);
2393		if (retry-- <= 0)
2394			break;
2395		if (time_after(jiffies, deadline))
2396			break;
2397	}
2398
2399	/* FIXME: if we passed the deadline, the following
2400	 * code probably produces an invalid result
2401	 */
2402
2403	/* finally, read device signature from TF registers */
2404	*class = ata_dev_try_classify(ap->link.device, 1, NULL);
2405
2406	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2407
2408	WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2409
2410	VPRINTK("EXIT\n");
2411}
2412
2413static int mv_prereset(struct ata_link *link, unsigned long deadline)
2414{
2415	struct ata_port *ap = link->ap;
2416	struct mv_port_priv *pp	= ap->private_data;
2417
2418	mv_stop_dma(ap);
2419
2420	if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
2421		pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2422
2423	return 0;
2424}
2425
2426static int mv_hardreset(struct ata_link *link, unsigned int *class,
2427			unsigned long deadline)
2428{
2429	struct ata_port *ap = link->ap;
2430	struct mv_host_priv *hpriv = ap->host->private_data;
2431	void __iomem *mmio = hpriv->base;
2432
2433	mv_stop_dma(ap);
2434
2435	mv_channel_reset(hpriv, mmio, ap->port_no);
2436
2437	mv_phy_reset(ap, class, deadline);
2438
2439	return 0;
2440}
2441
2442static void mv_postreset(struct ata_link *link, unsigned int *classes)
2443{
2444	struct ata_port *ap = link->ap;
2445	u32 serr;
2446
2447	/* print link status */
2448	sata_print_link_status(link);
2449
2450	/* clear SError */
2451	sata_scr_read(link, SCR_ERROR, &serr);
2452	sata_scr_write_flush(link, SCR_ERROR, serr);
2453
2454	/* bail out if no device is present */
2455	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2456		DPRINTK("EXIT, no device\n");
2457		return;
2458	}
2459
2460	/* set up device control */
2461	iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2462}
2463
2464static void mv_error_handler(struct ata_port *ap)
2465{
2466	ata_do_eh(ap, mv_prereset, ata_std_softreset,
2467		  mv_hardreset, mv_postreset);
2468}
2469
2470static void mv_eh_freeze(struct ata_port *ap)
2471{
2472	struct mv_host_priv *hpriv = ap->host->private_data;
2473	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2474	u32 tmp, mask;
2475	unsigned int shift;
2476
2477	/* FIXME: handle coalescing completion events properly */
2478
2479	shift = ap->port_no * 2;
2480	if (hc > 0)
2481		shift++;
2482
2483	mask = 0x3 << shift;
2484
2485	/* disable assertion of portN err, done events */
2486	tmp = readl(hpriv->main_mask_reg_addr);
2487	writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2488}
2489
2490static void mv_eh_thaw(struct ata_port *ap)
2491{
2492	struct mv_host_priv *hpriv = ap->host->private_data;
2493	void __iomem *mmio = hpriv->base;
2494	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2495	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2496	void __iomem *port_mmio = mv_ap_base(ap);
2497	u32 tmp, mask, hc_irq_cause;
2498	unsigned int shift, hc_port_no = ap->port_no;
2499
2500	/* FIXME: handle coalescing completion events properly */
2501
2502	shift = ap->port_no * 2;
2503	if (hc > 0) {
2504		shift++;
2505		hc_port_no -= 4;
2506	}
2507
2508	mask = 0x3 << shift;
2509
2510	/* clear EDMA errors on this port */
2511	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2512
2513	/* clear pending irq events */
2514	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2515	hc_irq_cause &= ~(1 << hc_port_no);	/* clear CRPB-done */
2516	hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2517	writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2518
2519	/* enable assertion of portN err, done events */
2520	tmp = readl(hpriv->main_mask_reg_addr);
2521	writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2522}
2523
2524/**
2525 *      mv_port_init - Perform some early initialization on a single port.
2526 *      @port: libata data structure storing shadow register addresses
2527 *      @port_mmio: base address of the port
2528 *
2529 *      Initialize shadow register mmio addresses, clear outstanding
2530 *      interrupts on the port, and unmask interrupts for the future
2531 *      start of the port.
2532 *
2533 *      LOCKING:
2534 *      Inherited from caller.
2535 */
2536static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2537{
2538	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2539	unsigned serr_ofs;
2540
2541	/* PIO related setup
2542	 */
2543	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2544	port->error_addr =
2545		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2546	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2547	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2548	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2549	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2550	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2551	port->status_addr =
2552		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2553	/* special case: control/altstatus doesn't have ATA_REG_ address */
2554	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2555
2556	/* unused: */
2557	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2558
2559	/* Clear any currently outstanding port interrupt conditions */
2560	serr_ofs = mv_scr_offset(SCR_ERROR);
2561	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2562	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2563
2564	/* unmask all non-transient EDMA error interrupts */
2565	writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2566
2567	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2568		readl(port_mmio + EDMA_CFG_OFS),
2569		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2570		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2571}
2572
2573static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2574{
2575	struct pci_dev *pdev = to_pci_dev(host->dev);
2576	struct mv_host_priv *hpriv = host->private_data;
2577	u32 hp_flags = hpriv->hp_flags;
2578
2579	switch (board_idx) {
2580	case chip_5080:
2581		hpriv->ops = &mv5xxx_ops;
2582		hp_flags |= MV_HP_GEN_I;
2583
2584		switch (pdev->revision) {
2585		case 0x1:
2586			hp_flags |= MV_HP_ERRATA_50XXB0;
2587			break;
2588		case 0x3:
2589			hp_flags |= MV_HP_ERRATA_50XXB2;
2590			break;
2591		default:
2592			dev_printk(KERN_WARNING, &pdev->dev,
2593			   "Applying 50XXB2 workarounds to unknown rev\n");
2594			hp_flags |= MV_HP_ERRATA_50XXB2;
2595			break;
2596		}
2597		break;
2598
2599	case chip_504x:
2600	case chip_508x:
2601		hpriv->ops = &mv5xxx_ops;
2602		hp_flags |= MV_HP_GEN_I;
2603
2604		switch (pdev->revision) {
2605		case 0x0:
2606			hp_flags |= MV_HP_ERRATA_50XXB0;
2607			break;
2608		case 0x3:
2609			hp_flags |= MV_HP_ERRATA_50XXB2;
2610			break;
2611		default:
2612			dev_printk(KERN_WARNING, &pdev->dev,
2613			   "Applying B2 workarounds to unknown rev\n");
2614			hp_flags |= MV_HP_ERRATA_50XXB2;
2615			break;
2616		}
2617		break;
2618
2619	case chip_604x:
2620	case chip_608x:
2621		hpriv->ops = &mv6xxx_ops;
2622		hp_flags |= MV_HP_GEN_II;
2623
2624		switch (pdev->revision) {
2625		case 0x7:
2626			hp_flags |= MV_HP_ERRATA_60X1B2;
2627			break;
2628		case 0x9:
2629			hp_flags |= MV_HP_ERRATA_60X1C0;
2630			break;
2631		default:
2632			dev_printk(KERN_WARNING, &pdev->dev,
2633				   "Applying B2 workarounds to unknown rev\n");
2634			hp_flags |= MV_HP_ERRATA_60X1B2;
2635			break;
2636		}
2637		break;
2638
2639	case chip_7042:
2640		hp_flags |= MV_HP_PCIE;
2641		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2642		    (pdev->device == 0x2300 || pdev->device == 0x2310))
2643		{
2644			/*
2645			 * Highpoint RocketRAID PCIe 23xx series cards:
2646			 *
2647			 * Unconfigured drives are treated as "Legacy"
2648			 * by the BIOS, and it overwrites sector 8 with
2649			 * a "Lgcy" metadata block prior to Linux boot.
2650			 *
2651			 * Configured drives (RAID or JBOD) leave sector 8
2652			 * alone, but instead overwrite a high numbered
2653			 * sector for the RAID metadata.  This sector can
2654			 * be determined exactly, by truncating the physical
2655			 * drive capacity to a nice even GB value.
2656			 *
2657			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2658			 *
2659			 * Warn the user, lest they think we're just buggy.
2660			 */
2661			printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2662				" BIOS CORRUPTS DATA on all attached drives,"
2663				" regardless of if/how they are configured."
2664				" BEWARE!\n");
2665			printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2666				" use sectors 8-9 on \"Legacy\" drives,"
2667				" and avoid the final two gigabytes on"
2668				" all RocketRAID BIOS initialized drives.\n");
2669		}
2670	case chip_6042:
2671		hpriv->ops = &mv6xxx_ops;
2672		hp_flags |= MV_HP_GEN_IIE;
2673
2674		switch (pdev->revision) {
2675		case 0x0:
2676			hp_flags |= MV_HP_ERRATA_XX42A0;
2677			break;
2678		case 0x1:
2679			hp_flags |= MV_HP_ERRATA_60X1C0;
2680			break;
2681		default:
2682			dev_printk(KERN_WARNING, &pdev->dev,
2683			   "Applying 60X1C0 workarounds to unknown rev\n");
2684			hp_flags |= MV_HP_ERRATA_60X1C0;
2685			break;
2686		}
2687		break;
2688	case chip_soc:
2689		hpriv->ops = &mv_soc_ops;
2690		hp_flags |= MV_HP_ERRATA_60X1C0;
2691		break;
2692
2693	default:
2694		dev_printk(KERN_ERR, host->dev,
2695			   "BUG: invalid board index %u\n", board_idx);
2696		return 1;
2697	}
2698
2699	hpriv->hp_flags = hp_flags;
2700	if (hp_flags & MV_HP_PCIE) {
2701		hpriv->irq_cause_ofs	= PCIE_IRQ_CAUSE_OFS;
2702		hpriv->irq_mask_ofs	= PCIE_IRQ_MASK_OFS;
2703		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
2704	} else {
2705		hpriv->irq_cause_ofs	= PCI_IRQ_CAUSE_OFS;
2706		hpriv->irq_mask_ofs	= PCI_IRQ_MASK_OFS;
2707		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
2708	}
2709
2710	return 0;
2711}
2712
2713/**
2714 *      mv_init_host - Perform some early initialization of the host.
2715 *	@host: ATA host to initialize
2716 *      @board_idx: controller index
2717 *
2718 *      If possible, do an early global reset of the host.  Then do
2719 *      our port init and clear/unmask all/relevant host interrupts.
2720 *
2721 *      LOCKING:
2722 *      Inherited from caller.
2723 */
2724static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2725{
2726	int rc = 0, n_hc, port, hc;
2727	struct mv_host_priv *hpriv = host->private_data;
2728	void __iomem *mmio = hpriv->base;
2729
2730	rc = mv_chip_id(host, board_idx);
2731	if (rc)
2732	goto done;
2733
2734	if (HAS_PCI(host)) {
2735		hpriv->main_cause_reg_addr = hpriv->base +
2736		  HC_MAIN_IRQ_CAUSE_OFS;
2737		hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2738	} else {
2739		hpriv->main_cause_reg_addr = hpriv->base +
2740		  HC_SOC_MAIN_IRQ_CAUSE_OFS;
2741		hpriv->main_mask_reg_addr = hpriv->base +
2742		  HC_SOC_MAIN_IRQ_MASK_OFS;
2743	}
2744	/* global interrupt mask */
2745	writel(0, hpriv->main_mask_reg_addr);
2746
2747	n_hc = mv_get_hc_count(host->ports[0]->flags);
2748
2749	for (port = 0; port < host->n_ports; port++)
2750		hpriv->ops->read_preamp(hpriv, port, mmio);
2751
2752	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2753	if (rc)
2754		goto done;
2755
2756	hpriv->ops->reset_flash(hpriv, mmio);
2757	hpriv->ops->reset_bus(host, mmio);
2758	hpriv->ops->enable_leds(hpriv, mmio);
2759
2760	for (port = 0; port < host->n_ports; port++) {
2761		if (IS_GEN_II(hpriv)) {
2762			void __iomem *port_mmio = mv_port_base(mmio, port);
2763
2764			u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2765			ifctl |= (1 << 7);		/* enable gen2i speed */
2766			ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2767			writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2768		}
2769
2770		hpriv->ops->phy_errata(hpriv, mmio, port);
2771	}
2772
2773	for (port = 0; port < host->n_ports; port++) {
2774		struct ata_port *ap = host->ports[port];
2775		void __iomem *port_mmio = mv_port_base(mmio, port);
2776
2777		mv_port_init(&ap->ioaddr, port_mmio);
2778
2779#ifdef CONFIG_PCI
2780		if (HAS_PCI(host)) {
2781			unsigned int offset = port_mmio - mmio;
2782			ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2783			ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2784		}
2785#endif
2786	}
2787
2788	for (hc = 0; hc < n_hc; hc++) {
2789		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2790
2791		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2792			"(before clear)=0x%08x\n", hc,
2793			readl(hc_mmio + HC_CFG_OFS),
2794			readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2795
2796		/* Clear any currently outstanding hc interrupt conditions */
2797		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2798	}
2799
2800	if (HAS_PCI(host)) {
2801		/* Clear any currently outstanding host interrupt conditions */
2802		writelfl(0, mmio + hpriv->irq_cause_ofs);
2803
2804		/* and unmask interrupt generation for host regs */
2805		writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2806		if (IS_GEN_I(hpriv))
2807			writelfl(~HC_MAIN_MASKED_IRQS_5,
2808				 hpriv->main_mask_reg_addr);
2809		else
2810			writelfl(~HC_MAIN_MASKED_IRQS,
2811				 hpriv->main_mask_reg_addr);
2812
2813		VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2814			"PCI int cause/mask=0x%08x/0x%08x\n",
2815			readl(hpriv->main_cause_reg_addr),
2816			readl(hpriv->main_mask_reg_addr),
2817			readl(mmio + hpriv->irq_cause_ofs),
2818			readl(mmio + hpriv->irq_mask_ofs));
2819	} else {
2820		writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2821			 hpriv->main_mask_reg_addr);
2822		VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2823			readl(hpriv->main_cause_reg_addr),
2824			readl(hpriv->main_mask_reg_addr));
2825	}
2826done:
2827	return rc;
2828}
2829
2830static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2831{
2832	hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2833							     MV_CRQB_Q_SZ, 0);
2834	if (!hpriv->crqb_pool)
2835		return -ENOMEM;
2836
2837	hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2838							     MV_CRPB_Q_SZ, 0);
2839	if (!hpriv->crpb_pool)
2840		return -ENOMEM;
2841
2842	hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2843							     MV_SG_TBL_SZ, 0);
2844	if (!hpriv->sg_tbl_pool)
2845		return -ENOMEM;
2846
2847	return 0;
2848}
2849
2850/**
2851 *      mv_platform_probe - handle a positive probe of an soc Marvell
2852 *      host
2853 *      @pdev: platform device found
2854 *
2855 *      LOCKING:
2856 *      Inherited from caller.
2857 */
2858static int mv_platform_probe(struct platform_device *pdev)
2859{
2860	static int printed_version;
2861	const struct mv_sata_platform_data *mv_platform_data;
2862	const struct ata_port_info *ppi[] =
2863	    { &mv_port_info[chip_soc], NULL };
2864	struct ata_host *host;
2865	struct mv_host_priv *hpriv;
2866	struct resource *res;
2867	int n_ports, rc;
2868
2869	if (!printed_version++)
2870		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2871
2872	/*
2873	 * Simple resource validation ..
2874	 */
2875	if (unlikely(pdev->num_resources != 2)) {
2876		dev_err(&pdev->dev, "invalid number of resources\n");
2877		return -EINVAL;
2878	}
2879
2880	/*
2881	 * Get the register base first
2882	 */
2883	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2884	if (res == NULL)
2885		return -EINVAL;
2886
2887	/* allocate host */
2888	mv_platform_data = pdev->dev.platform_data;
2889	n_ports = mv_platform_data->n_ports;
2890
2891	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2892	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2893
2894	if (!host || !hpriv)
2895		return -ENOMEM;
2896	host->private_data = hpriv;
2897	hpriv->n_ports = n_ports;
2898
2899	host->iomap = NULL;
2900	hpriv->base = devm_ioremap(&pdev->dev, res->start,
2901				   res->end - res->start + 1);
2902	hpriv->base -= MV_SATAHC0_REG_BASE;
2903
2904	rc = mv_create_dma_pools(hpriv, &pdev->dev);
2905	if (rc)
2906		return rc;
2907
2908	/* initialize adapter */
2909	rc = mv_init_host(host, chip_soc);
2910	if (rc)
2911		return rc;
2912
2913	dev_printk(KERN_INFO, &pdev->dev,
2914		   "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2915		   host->n_ports);
2916
2917	return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2918				 IRQF_SHARED, &mv6_sht);
2919}
2920
2921/*
2922 *
2923 *      mv_platform_remove    -       unplug a platform interface
2924 *      @pdev: platform device
2925 *
2926 *      A platform bus SATA device has been unplugged. Perform the needed
2927 *      cleanup. Also called on module unload for any active devices.
2928 */
2929static int __devexit mv_platform_remove(struct platform_device *pdev)
2930{
2931	struct device *dev = &pdev->dev;
2932	struct ata_host *host = dev_get_drvdata(dev);
2933
2934	ata_host_detach(host);
2935	return 0;
2936}
2937
2938static struct platform_driver mv_platform_driver = {
2939	.probe			= mv_platform_probe,
2940	.remove			= __devexit_p(mv_platform_remove),
2941	.driver			= {
2942				   .name = DRV_NAME,
2943				   .owner = THIS_MODULE,
2944				  },
2945};
2946
2947
2948#ifdef CONFIG_PCI
2949static int mv_pci_init_one(struct pci_dev *pdev,
2950			   const struct pci_device_id *ent);
2951
2952
2953static struct pci_driver mv_pci_driver = {
2954	.name			= DRV_NAME,
2955	.id_table		= mv_pci_tbl,
2956	.probe			= mv_pci_init_one,
2957	.remove			= ata_pci_remove_one,
2958};
2959
2960/*
2961 * module options
2962 */
2963static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */
2964
2965
2966/* move to PCI layer or libata core? */
2967static int pci_go_64(struct pci_dev *pdev)
2968{
2969	int rc;
2970
2971	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2972		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2973		if (rc) {
2974			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2975			if (rc) {
2976				dev_printk(KERN_ERR, &pdev->dev,
2977					   "64-bit DMA enable failed\n");
2978				return rc;
2979			}
2980		}
2981	} else {
2982		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2983		if (rc) {
2984			dev_printk(KERN_ERR, &pdev->dev,
2985				   "32-bit DMA enable failed\n");
2986			return rc;
2987		}
2988		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2989		if (rc) {
2990			dev_printk(KERN_ERR, &pdev->dev,
2991				   "32-bit consistent DMA enable failed\n");
2992			return rc;
2993		}
2994	}
2995
2996	return rc;
2997}
2998
2999/**
3000 *      mv_print_info - Dump key info to kernel log for perusal.
3001 *      @host: ATA host to print info about
3002 *
3003 *      FIXME: complete this.
3004 *
3005 *      LOCKING:
3006 *      Inherited from caller.
3007 */
3008static void mv_print_info(struct ata_host *host)
3009{
3010	struct pci_dev *pdev = to_pci_dev(host->dev);
3011	struct mv_host_priv *hpriv = host->private_data;
3012	u8 scc;
3013	const char *scc_s, *gen;
3014
3015	/* Use this to determine the HW stepping of the chip so we know
3016	 * what errata to workaround
3017	 */
3018	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3019	if (scc == 0)
3020		scc_s = "SCSI";
3021	else if (scc == 0x01)
3022		scc_s = "RAID";
3023	else
3024		scc_s = "?";
3025
3026	if (IS_GEN_I(hpriv))
3027		gen = "I";
3028	else if (IS_GEN_II(hpriv))
3029		gen = "II";
3030	else if (IS_GEN_IIE(hpriv))
3031		gen = "IIE";
3032	else
3033		gen = "?";
3034
3035	dev_printk(KERN_INFO, &pdev->dev,
3036	       "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3037	       gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3038	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3039}
3040
3041/**
3042 *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
3043 *      @pdev: PCI device found
3044 *      @ent: PCI device ID entry for the matched host
3045 *
3046 *      LOCKING:
3047 *      Inherited from caller.
3048 */
3049static int mv_pci_init_one(struct pci_dev *pdev,
3050			   const struct pci_device_id *ent)
3051{
3052	static int printed_version;
3053	unsigned int board_idx = (unsigned int)ent->driver_data;
3054	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3055	struct ata_host *host;
3056	struct mv_host_priv *hpriv;
3057	int n_ports, rc;
3058
3059	if (!printed_version++)
3060		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3061
3062	/* allocate host */
3063	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3064
3065	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3066	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3067	if (!host || !hpriv)
3068		return -ENOMEM;
3069	host->private_data = hpriv;
3070	hpriv->n_ports = n_ports;
3071
3072	/* acquire resources */
3073	rc = pcim_enable_device(pdev);
3074	if (rc)
3075		return rc;
3076
3077	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3078	if (rc == -EBUSY)
3079		pcim_pin_device(pdev);
3080	if (rc)
3081		return rc;
3082	host->iomap = pcim_iomap_table(pdev);
3083	hpriv->base = host->iomap[MV_PRIMARY_BAR];
3084
3085	rc = pci_go_64(pdev);
3086	if (rc)
3087		return rc;
3088
3089	rc = mv_create_dma_pools(hpriv, &pdev->dev);
3090	if (rc)
3091		return rc;
3092
3093	/* initialize adapter */
3094	rc = mv_init_host(host, board_idx);
3095	if (rc)
3096		return rc;
3097
3098	/* Enable interrupts */
3099	if (msi && pci_enable_msi(pdev))
3100		pci_intx(pdev, 1);
3101
3102	mv_dump_pci_cfg(pdev, 0x68);
3103	mv_print_info(host);
3104
3105	pci_set_master(pdev);
3106	pci_try_set_mwi(pdev);
3107	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3108				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3109}
3110#endif
3111
3112static int mv_platform_probe(struct platform_device *pdev);
3113static int __devexit mv_platform_remove(struct platform_device *pdev);
3114
3115static int __init mv_init(void)
3116{
3117	int rc = -ENODEV;
3118#ifdef CONFIG_PCI
3119	rc = pci_register_driver(&mv_pci_driver);
3120	if (rc < 0)
3121		return rc;
3122#endif
3123	rc = platform_driver_register(&mv_platform_driver);
3124
3125#ifdef CONFIG_PCI
3126	if (rc < 0)
3127		pci_unregister_driver(&mv_pci_driver);
3128#endif
3129	return rc;
3130}
3131
3132static void __exit mv_exit(void)
3133{
3134#ifdef CONFIG_PCI
3135	pci_unregister_driver(&mv_pci_driver);
3136#endif
3137	platform_driver_unregister(&mv_platform_driver);
3138}
3139
3140MODULE_AUTHOR("Brett Russ");
3141MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3142MODULE_LICENSE("GPL");
3143MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3144MODULE_VERSION(DRV_VERSION);
3145MODULE_ALIAS("platform:sata_mv");
3146
3147#ifdef CONFIG_PCI
3148module_param(msi, int, 0444);
3149MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3150#endif
3151
3152module_init(mv_init);
3153module_exit(mv_exit);
3154