libata.h revision 7a801184fa480e11e6431f184a5bdf31f63326fb
1/*
2 *  Copyright 2003-2005 Red Hat, Inc.  All rights reserved.
3 *  Copyright 2003-2005 Jeff Garzik
4 *
5 *
6 *  This program is free software; you can redistribute it and/or modify
7 *  it under the terms of the GNU General Public License as published by
8 *  the Free Software Foundation; either version 2, or (at your option)
9 *  any later version.
10 *
11 *  This program is distributed in the hope that it will be useful,
12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 *  GNU General Public License for more details.
15 *
16 *  You should have received a copy of the GNU General Public License
17 *  along with this program; see the file COPYING.  If not, write to
18 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 *
21 *  libata documentation is available via 'make {ps|pdf}docs',
22 *  as Documentation/DocBook/libata.*
23 *
24 */
25
26#ifndef __LINUX_LIBATA_H__
27#define __LINUX_LIBATA_H__
28
29#include <linux/delay.h>
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <asm/scatterlist.h>
34#include <asm/io.h>
35#include <linux/ata.h>
36#include <linux/workqueue.h>
37#include <scsi/scsi_host.h>
38
39/*
40 * Define if arch has non-standard setup.  This is a _PCI_ standard
41 * not a legacy or ISA standard.
42 */
43#ifdef CONFIG_ATA_NONSTANDARD
44#include <asm/libata-portmap.h>
45#else
46#include <asm-generic/libata-portmap.h>
47#endif
48
49/*
50 * compile-time options: to be removed as soon as all the drivers are
51 * converted to the new debugging mechanism
52 */
53#undef ATA_DEBUG		/* debugging output */
54#undef ATA_VERBOSE_DEBUG	/* yet more debugging output */
55#undef ATA_IRQ_TRAP		/* define to ack screaming irqs */
56#undef ATA_NDEBUG		/* define to disable quick runtime checks */
57#define ATA_ENABLE_PATA		/* define to enable PATA support in some
58				 * low-level drivers */
59
60
61/* note: prints function name for you */
62#ifdef ATA_DEBUG
63#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
64#ifdef ATA_VERBOSE_DEBUG
65#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
66#else
67#define VPRINTK(fmt, args...)
68#endif	/* ATA_VERBOSE_DEBUG */
69#else
70#define DPRINTK(fmt, args...)
71#define VPRINTK(fmt, args...)
72#endif	/* ATA_DEBUG */
73
74#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
75
76/* NEW: debug levels */
77#define HAVE_LIBATA_MSG 1
78
79enum {
80	ATA_MSG_DRV	= 0x0001,
81	ATA_MSG_INFO	= 0x0002,
82	ATA_MSG_PROBE	= 0x0004,
83	ATA_MSG_WARN	= 0x0008,
84	ATA_MSG_MALLOC	= 0x0010,
85	ATA_MSG_CTL	= 0x0020,
86	ATA_MSG_INTR	= 0x0040,
87	ATA_MSG_ERR	= 0x0080,
88};
89
90#define ata_msg_drv(p)    ((p)->msg_enable & ATA_MSG_DRV)
91#define ata_msg_info(p)   ((p)->msg_enable & ATA_MSG_INFO)
92#define ata_msg_probe(p)  ((p)->msg_enable & ATA_MSG_PROBE)
93#define ata_msg_warn(p)   ((p)->msg_enable & ATA_MSG_WARN)
94#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
95#define ata_msg_ctl(p)    ((p)->msg_enable & ATA_MSG_CTL)
96#define ata_msg_intr(p)   ((p)->msg_enable & ATA_MSG_INTR)
97#define ata_msg_err(p)    ((p)->msg_enable & ATA_MSG_ERR)
98
99static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
100{
101	if (dval < 0 || dval >= (sizeof(u32) * 8))
102		return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
103	if (!dval)
104		return 0;
105	return (1 << dval) - 1;
106}
107
108/* defines only for the constants which don't work well as enums */
109#define ATA_TAG_POISON		0xfafbfcfdU
110
111/* move to PCI layer? */
112#define PCI_VDEVICE(vendor, device)		\
113	PCI_VENDOR_ID_##vendor, (device),	\
114	PCI_ANY_ID, PCI_ANY_ID, 0, 0
115
116static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
117{
118	return &pdev->dev;
119}
120
121enum {
122	/* various global constants */
123	LIBATA_MAX_PRD		= ATA_MAX_PRD / 2,
124	ATA_MAX_PORTS		= 8,
125	ATA_DEF_QUEUE		= 1,
126	/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
127	ATA_MAX_QUEUE		= 32,
128	ATA_TAG_INTERNAL	= ATA_MAX_QUEUE - 1,
129	ATA_MAX_BUS		= 2,
130	ATA_DEF_BUSY_WAIT	= 10000,
131	ATA_SHORT_PAUSE		= (HZ >> 6) + 1,
132
133	ATA_SHT_EMULATED	= 1,
134	ATA_SHT_CMD_PER_LUN	= 1,
135	ATA_SHT_THIS_ID		= -1,
136	ATA_SHT_USE_CLUSTERING	= 1,
137
138	/* struct ata_device stuff */
139	ATA_DFLAG_LBA		= (1 << 0), /* device supports LBA */
140	ATA_DFLAG_LBA48		= (1 << 1), /* device supports LBA48 */
141	ATA_DFLAG_CDB_INTR	= (1 << 2), /* device asserts INTRQ when ready for CDB */
142	ATA_DFLAG_NCQ		= (1 << 3), /* device supports NCQ */
143	ATA_DFLAG_FLUSH_EXT	= (1 << 4), /* do FLUSH_EXT instead of FLUSH */
144	ATA_DFLAG_CFG_MASK	= (1 << 8) - 1,
145
146	ATA_DFLAG_PIO		= (1 << 8), /* device limited to PIO mode */
147	ATA_DFLAG_NCQ_OFF	= (1 << 9), /* device limited to non-NCQ mode */
148	ATA_DFLAG_SUSPENDED	= (1 << 10), /* device suspended */
149	ATA_DFLAG_INIT_MASK	= (1 << 16) - 1,
150
151	ATA_DFLAG_DETACH	= (1 << 16),
152	ATA_DFLAG_DETACHED	= (1 << 17),
153
154	ATA_DEV_UNKNOWN		= 0,	/* unknown device */
155	ATA_DEV_ATA		= 1,	/* ATA device */
156	ATA_DEV_ATA_UNSUP	= 2,	/* ATA device (unsupported) */
157	ATA_DEV_ATAPI		= 3,	/* ATAPI device */
158	ATA_DEV_ATAPI_UNSUP	= 4,	/* ATAPI device (unsupported) */
159	ATA_DEV_NONE		= 5,	/* no device */
160
161	/* struct ata_port flags */
162	ATA_FLAG_SLAVE_POSS	= (1 << 0), /* host supports slave dev */
163					    /* (doesn't imply presence) */
164	ATA_FLAG_SATA		= (1 << 1),
165	ATA_FLAG_NO_LEGACY	= (1 << 2), /* no legacy mode check */
166	ATA_FLAG_MMIO		= (1 << 3), /* use MMIO, not PIO */
167	ATA_FLAG_SRST		= (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
168	ATA_FLAG_SATA_RESET	= (1 << 5), /* (obsolete) use COMRESET */
169	ATA_FLAG_NO_ATAPI	= (1 << 6), /* No ATAPI support */
170	ATA_FLAG_PIO_DMA	= (1 << 7), /* PIO cmds via DMA */
171	ATA_FLAG_PIO_LBA48	= (1 << 8), /* Host DMA engine is LBA28 only */
172	ATA_FLAG_PIO_POLLING	= (1 << 9), /* use polling PIO if LLD
173					     * doesn't handle PIO interrupts */
174	ATA_FLAG_NCQ		= (1 << 10), /* host supports NCQ */
175	ATA_FLAG_HRST_TO_RESUME	= (1 << 11), /* hardreset to resume phy */
176	ATA_FLAG_SKIP_D2H_BSY	= (1 << 12), /* can't wait for the first D2H
177					      * Register FIS clearing BSY */
178	ATA_FLAG_DEBUGMSG	= (1 << 13),
179	ATA_FLAG_SETXFER_POLLING= (1 << 14), /* use polling for SETXFER */
180
181	/* The following flag belongs to ap->pflags but is kept in
182	 * ap->flags because it's referenced in many LLDs and will be
183	 * removed in not-too-distant future.
184	 */
185	ATA_FLAG_DISABLED	= (1 << 23), /* port is disabled, ignore it */
186
187	/* bits 24:31 of ap->flags are reserved for LLD specific flags */
188
189	/* struct ata_port pflags */
190	ATA_PFLAG_EH_PENDING	= (1 << 0), /* EH pending */
191	ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
192	ATA_PFLAG_FROZEN	= (1 << 2), /* port is frozen */
193	ATA_PFLAG_RECOVERED	= (1 << 3), /* recovery action performed */
194	ATA_PFLAG_LOADING	= (1 << 4), /* boot/loading probe */
195	ATA_PFLAG_UNLOADING	= (1 << 5), /* module is unloading */
196	ATA_PFLAG_SCSI_HOTPLUG	= (1 << 6), /* SCSI hotplug scheduled */
197
198	ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */
199	ATA_PFLAG_SUSPENDED	= (1 << 17), /* port is suspended (power) */
200	ATA_PFLAG_PM_PENDING	= (1 << 18), /* PM operation pending */
201
202	/* struct ata_queued_cmd flags */
203	ATA_QCFLAG_ACTIVE	= (1 << 0), /* cmd not yet ack'd to scsi lyer */
204	ATA_QCFLAG_SG		= (1 << 1), /* have s/g table? */
205	ATA_QCFLAG_SINGLE	= (1 << 2), /* no s/g, just a single buffer */
206	ATA_QCFLAG_DMAMAP	= ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
207	ATA_QCFLAG_IO		= (1 << 3), /* standard IO command */
208	ATA_QCFLAG_RESULT_TF	= (1 << 4), /* result TF requested */
209
210	ATA_QCFLAG_FAILED	= (1 << 16), /* cmd failed and is owned by EH */
211	ATA_QCFLAG_SENSE_VALID	= (1 << 17), /* sense data valid */
212	ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
213
214	/* host set flags */
215	ATA_HOST_SIMPLEX	= (1 << 0),	/* Host is simplex, one DMA channel per host only */
216
217	/* various lengths of time */
218	ATA_TMOUT_BOOT		= 30 * HZ,	/* heuristic */
219	ATA_TMOUT_BOOT_QUICK	= 7 * HZ,	/* heuristic */
220	ATA_TMOUT_INTERNAL	= 30 * HZ,
221	ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
222
223	/* ATA bus states */
224	BUS_UNKNOWN		= 0,
225	BUS_DMA			= 1,
226	BUS_IDLE		= 2,
227	BUS_NOINTR		= 3,
228	BUS_NODATA		= 4,
229	BUS_TIMER		= 5,
230	BUS_PIO			= 6,
231	BUS_EDD			= 7,
232	BUS_IDENTIFY		= 8,
233	BUS_PACKET		= 9,
234
235	/* SATA port states */
236	PORT_UNKNOWN		= 0,
237	PORT_ENABLED		= 1,
238	PORT_DISABLED		= 2,
239
240	/* encoding various smaller bitmaps into a single
241	 * unsigned int bitmap
242	 */
243	ATA_BITS_PIO		= 7,
244	ATA_BITS_MWDMA		= 5,
245	ATA_BITS_UDMA		= 8,
246
247	ATA_SHIFT_PIO		= 0,
248	ATA_SHIFT_MWDMA		= ATA_SHIFT_PIO + ATA_BITS_PIO,
249	ATA_SHIFT_UDMA		= ATA_SHIFT_MWDMA + ATA_BITS_MWDMA,
250
251	ATA_MASK_PIO		= ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO,
252	ATA_MASK_MWDMA		= ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA,
253	ATA_MASK_UDMA		= ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA,
254
255	/* size of buffer to pad xfers ending on unaligned boundaries */
256	ATA_DMA_PAD_SZ		= 4,
257	ATA_DMA_PAD_BUF_SZ	= ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
258
259	/* masks for port functions */
260	ATA_PORT_PRIMARY	= (1 << 0),
261	ATA_PORT_SECONDARY	= (1 << 1),
262
263	/* ering size */
264	ATA_ERING_SIZE		= 32,
265
266	/* desc_len for ata_eh_info and context */
267	ATA_EH_DESC_LEN		= 80,
268
269	/* reset / recovery action types */
270	ATA_EH_REVALIDATE	= (1 << 0),
271	ATA_EH_SOFTRESET	= (1 << 1),
272	ATA_EH_HARDRESET	= (1 << 2),
273	ATA_EH_SUSPEND		= (1 << 3),
274	ATA_EH_RESUME		= (1 << 4),
275	ATA_EH_PM_FREEZE	= (1 << 5),
276
277	ATA_EH_RESET_MASK	= ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
278	ATA_EH_PERDEV_MASK	= ATA_EH_REVALIDATE | ATA_EH_SUSPEND |
279				  ATA_EH_RESUME | ATA_EH_PM_FREEZE,
280
281	/* ata_eh_info->flags */
282	ATA_EHI_HOTPLUGGED	= (1 << 0),  /* could have been hotplugged */
283	ATA_EHI_RESUME_LINK	= (1 << 1),  /* resume link (reset modifier) */
284	ATA_EHI_NO_AUTOPSY	= (1 << 2),  /* no autopsy */
285	ATA_EHI_QUIET		= (1 << 3),  /* be quiet */
286
287	ATA_EHI_DID_RESET	= (1 << 16), /* already reset this port */
288	ATA_EHI_PRINTINFO	= (1 << 17), /* print configuration info */
289	ATA_EHI_SETMODE		= (1 << 18), /* configure transfer mode */
290	ATA_EHI_POST_SETMODE	= (1 << 19), /* revaildating after setmode */
291
292	ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK,
293
294	/* max repeat if error condition is still set after ->error_handler */
295	ATA_EH_MAX_REPEAT	= 5,
296
297	/* how hard are we gonna try to probe/recover devices */
298	ATA_PROBE_MAX_TRIES	= 3,
299	ATA_EH_RESET_TRIES	= 3,
300	ATA_EH_DEV_TRIES	= 3,
301
302	/* Drive spinup time (time from power-on to the first D2H FIS)
303	 * in msecs - 8s currently.  Failing to get ready in this time
304	 * isn't critical.  It will result in reset failure for
305	 * controllers which can't wait for the first D2H FIS.  libata
306	 * will retry, so it just has to be long enough to spin up
307	 * most devices.
308	 */
309	ATA_SPINUP_WAIT		= 8000,
310
311	/* Horkage types. May be set by libata or controller on drives
312	   (some horkage may be drive/controller pair dependant */
313
314	ATA_HORKAGE_DIAGNOSTIC	= (1 << 0),	/* Failed boot diag */
315	ATA_HORKAGE_NODMA	= (1 << 1),	/* DMA problems */
316	ATA_HORKAGE_NONCQ	= (1 << 2),	/* Don't use NCQ */
317};
318
319enum hsm_task_states {
320	HSM_ST_IDLE,		/* no command on going */
321	HSM_ST,			/* (waiting the device to) transfer data */
322	HSM_ST_LAST,		/* (waiting the device to) complete command */
323	HSM_ST_ERR,		/* error */
324	HSM_ST_FIRST,		/* (waiting the device to)
325				   write CDB or first data block */
326};
327
328enum ata_completion_errors {
329	AC_ERR_DEV		= (1 << 0), /* device reported error */
330	AC_ERR_HSM		= (1 << 1), /* host state machine violation */
331	AC_ERR_TIMEOUT		= (1 << 2), /* timeout */
332	AC_ERR_MEDIA		= (1 << 3), /* media error */
333	AC_ERR_ATA_BUS		= (1 << 4), /* ATA bus error */
334	AC_ERR_HOST_BUS		= (1 << 5), /* host bus error */
335	AC_ERR_SYSTEM		= (1 << 6), /* system error */
336	AC_ERR_INVALID		= (1 << 7), /* invalid argument */
337	AC_ERR_OTHER		= (1 << 8), /* unknown */
338	AC_ERR_NODEV_HINT	= (1 << 9), /* polling device detection hint */
339};
340
341/* forward declarations */
342struct scsi_device;
343struct ata_port_operations;
344struct ata_port;
345struct ata_queued_cmd;
346
347/* typedefs */
348typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
349typedef int (*ata_prereset_fn_t)(struct ata_port *ap);
350typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes);
351typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes);
352
353struct ata_ioports {
354	unsigned long		cmd_addr;
355	unsigned long		data_addr;
356	unsigned long		error_addr;
357	unsigned long		feature_addr;
358	unsigned long		nsect_addr;
359	unsigned long		lbal_addr;
360	unsigned long		lbam_addr;
361	unsigned long		lbah_addr;
362	unsigned long		device_addr;
363	unsigned long		status_addr;
364	unsigned long		command_addr;
365	unsigned long		altstatus_addr;
366	unsigned long		ctl_addr;
367	unsigned long		bmdma_addr;
368	unsigned long		scr_addr;
369};
370
371struct ata_probe_ent {
372	struct list_head	node;
373	struct device 		*dev;
374	const struct ata_port_operations *port_ops;
375	struct scsi_host_template *sht;
376	struct ata_ioports	port[ATA_MAX_PORTS];
377	unsigned int		n_ports;
378	unsigned int		dummy_port_mask;
379	unsigned int		pio_mask;
380	unsigned int		mwdma_mask;
381	unsigned int		udma_mask;
382	unsigned long		irq;
383	unsigned long		irq2;
384	unsigned int		irq_flags;
385	unsigned long		port_flags;
386	unsigned long		_host_flags;
387	void __iomem		*mmio_base;
388	void			*private_data;
389
390	/* port_info for the secondary port.  Together with irq2, it's
391	 * used to implement non-uniform secondary port.  Currently,
392	 * the only user is ata_piix combined mode.  This workaround
393	 * will be removed together with ata_probe_ent when init model
394	 * is updated.
395	 */
396	const struct ata_port_info *pinfo2;
397};
398
399struct ata_host {
400	spinlock_t		lock;
401	struct device 		*dev;
402	unsigned long		irq;
403	unsigned long		irq2;
404	void __iomem		*mmio_base;
405	unsigned int		n_ports;
406	void			*private_data;
407	const struct ata_port_operations *ops;
408	unsigned long		flags;
409	int			simplex_claimed;	/* Keep seperate in case we
410							   ever need to do this locked */
411	struct ata_port		*ports[0];
412};
413
414struct ata_queued_cmd {
415	struct ata_port		*ap;
416	struct ata_device	*dev;
417
418	struct scsi_cmnd	*scsicmd;
419	void			(*scsidone)(struct scsi_cmnd *);
420
421	struct ata_taskfile	tf;
422	u8			cdb[ATAPI_CDB_LEN];
423
424	unsigned long		flags;		/* ATA_QCFLAG_xxx */
425	unsigned int		tag;
426	unsigned int		n_elem;
427	unsigned int		orig_n_elem;
428
429	int			dma_dir;
430
431	unsigned int		pad_len;
432
433	unsigned int		nsect;
434	unsigned int		cursect;
435
436	unsigned int		nbytes;
437	unsigned int		curbytes;
438
439	unsigned int		cursg;
440	unsigned int		cursg_ofs;
441
442	struct scatterlist	sgent;
443	struct scatterlist	pad_sgent;
444	void			*buf_virt;
445
446	/* DO NOT iterate over __sg manually, use ata_for_each_sg() */
447	struct scatterlist	*__sg;
448
449	unsigned int		err_mask;
450	struct ata_taskfile	result_tf;
451	ata_qc_cb_t		complete_fn;
452
453	void			*private_data;
454};
455
456struct ata_port_stats {
457	unsigned long		unhandled_irq;
458	unsigned long		idle_irq;
459	unsigned long		rw_reqbuf;
460};
461
462struct ata_ering_entry {
463	int			is_io;
464	unsigned int		err_mask;
465	u64			timestamp;
466};
467
468struct ata_ering {
469	int			cursor;
470	struct ata_ering_entry	ring[ATA_ERING_SIZE];
471};
472
473struct ata_device {
474	struct ata_port		*ap;
475	unsigned int		devno;		/* 0 or 1 */
476	unsigned long		flags;		/* ATA_DFLAG_xxx */
477	struct scsi_device	*sdev;		/* attached SCSI device */
478	/* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
479	u64			n_sectors;	/* size of device, if ATA */
480	unsigned int		class;		/* ATA_DEV_xxx */
481	u16			id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
482	u8			pio_mode;
483	u8			dma_mode;
484	u8			xfer_mode;
485	unsigned int		xfer_shift;	/* ATA_SHIFT_xxx */
486
487	unsigned int		multi_count;	/* sectors count for
488						   READ/WRITE MULTIPLE */
489	unsigned int		max_sectors;	/* per-device max sectors */
490	unsigned int		cdb_len;
491
492	/* per-dev xfer mask */
493	unsigned int		pio_mask;
494	unsigned int		mwdma_mask;
495	unsigned int		udma_mask;
496
497	/* for CHS addressing */
498	u16			cylinders;	/* Number of cylinders */
499	u16			heads;		/* Number of heads */
500	u16			sectors;	/* Number of sectors per track */
501
502	/* error history */
503	struct ata_ering	ering;
504	unsigned int		horkage;	/* List of broken features */
505};
506
507/* Offset into struct ata_device.  Fields above it are maintained
508 * acress device init.  Fields below are zeroed.
509 */
510#define ATA_DEVICE_CLEAR_OFFSET		offsetof(struct ata_device, n_sectors)
511
512struct ata_eh_info {
513	struct ata_device	*dev;		/* offending device */
514	u32			serror;		/* SError from LLDD */
515	unsigned int		err_mask;	/* port-wide err_mask */
516	unsigned int		action;		/* ATA_EH_* action mask */
517	unsigned int		dev_action[ATA_MAX_DEVICES]; /* dev EH action */
518	unsigned int		flags;		/* ATA_EHI_* flags */
519
520	unsigned long		hotplug_timestamp;
521	unsigned int		probe_mask;
522
523	char			desc[ATA_EH_DESC_LEN];
524	int			desc_len;
525};
526
527struct ata_eh_context {
528	struct ata_eh_info	i;
529	int			tries[ATA_MAX_DEVICES];
530	unsigned int		classes[ATA_MAX_DEVICES];
531	unsigned int		did_probe_mask;
532};
533
534struct ata_port {
535	struct Scsi_Host	*scsi_host; /* our co-allocated scsi host */
536	const struct ata_port_operations *ops;
537	spinlock_t		*lock;
538	unsigned long		flags;	/* ATA_FLAG_xxx */
539	unsigned int		pflags; /* ATA_PFLAG_xxx */
540	unsigned int		id;	/* unique id req'd by scsi midlyr */
541	unsigned int		port_no; /* unique port #; from zero */
542
543	struct ata_prd		*prd;	 /* our SG list */
544	dma_addr_t		prd_dma; /* and its DMA mapping */
545
546	void			*pad;	/* array of DMA pad buffers */
547	dma_addr_t		pad_dma;
548
549	struct ata_ioports	ioaddr;	/* ATA cmd/ctl/dma register blocks */
550
551	u8			ctl;	/* cache of ATA control register */
552	u8			last_ctl;	/* Cache last written value */
553	unsigned int		pio_mask;
554	unsigned int		mwdma_mask;
555	unsigned int		udma_mask;
556	unsigned int		cbl;	/* cable type; ATA_CBL_xxx */
557	unsigned int		hw_sata_spd_limit;
558	unsigned int		sata_spd_limit;	/* SATA PHY speed limit */
559
560	/* record runtime error info, protected by host lock */
561	struct ata_eh_info	eh_info;
562	/* EH context owned by EH */
563	struct ata_eh_context	eh_context;
564
565	struct ata_device	device[ATA_MAX_DEVICES];
566
567	struct ata_queued_cmd	qcmd[ATA_MAX_QUEUE];
568	unsigned long		qc_allocated;
569	unsigned int		qc_active;
570
571	unsigned int		active_tag;
572	u32			sactive;
573
574	struct ata_port_stats	stats;
575	struct ata_host		*host;
576	struct device 		*dev;
577
578	void			*port_task_data;
579	struct delayed_work	port_task;
580	struct delayed_work	hotplug_task;
581	struct work_struct	scsi_rescan_task;
582
583	unsigned int		hsm_task_state;
584
585	u32			msg_enable;
586	struct list_head	eh_done_q;
587	wait_queue_head_t	eh_wait_q;
588
589	pm_message_t		pm_mesg;
590	int			*pm_result;
591
592	void			*private_data;
593
594	u8			sector_buf[ATA_SECT_SIZE]; /* owned by EH */
595};
596
597struct ata_port_operations {
598	void (*port_disable) (struct ata_port *);
599
600	void (*dev_config) (struct ata_port *, struct ata_device *);
601
602	void (*set_piomode) (struct ata_port *, struct ata_device *);
603	void (*set_dmamode) (struct ata_port *, struct ata_device *);
604	unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long);
605
606	void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
607	void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
608
609	void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
610	u8   (*check_status)(struct ata_port *ap);
611	u8   (*check_altstatus)(struct ata_port *ap);
612	void (*dev_select)(struct ata_port *ap, unsigned int device);
613
614	void (*phy_reset) (struct ata_port *ap); /* obsolete */
615	void (*set_mode) (struct ata_port *ap);
616
617	void (*post_set_mode) (struct ata_port *ap);
618
619	int (*check_atapi_dma) (struct ata_queued_cmd *qc);
620
621	void (*bmdma_setup) (struct ata_queued_cmd *qc);
622	void (*bmdma_start) (struct ata_queued_cmd *qc);
623
624	void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
625
626	void (*qc_prep) (struct ata_queued_cmd *qc);
627	unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
628
629	/* Error handlers.  ->error_handler overrides ->eng_timeout and
630	 * indicates that new-style EH is in place.
631	 */
632	void (*eng_timeout) (struct ata_port *ap); /* obsolete */
633
634	void (*freeze) (struct ata_port *ap);
635	void (*thaw) (struct ata_port *ap);
636	void (*error_handler) (struct ata_port *ap);
637	void (*post_internal_cmd) (struct ata_queued_cmd *qc);
638
639	irq_handler_t irq_handler;
640	void (*irq_clear) (struct ata_port *);
641
642	u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
643	void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
644			   u32 val);
645
646	int (*port_suspend) (struct ata_port *ap, pm_message_t mesg);
647	int (*port_resume) (struct ata_port *ap);
648
649	int (*port_start) (struct ata_port *ap);
650	void (*port_stop) (struct ata_port *ap);
651
652	void (*host_stop) (struct ata_host *host);
653
654	void (*bmdma_stop) (struct ata_queued_cmd *qc);
655	u8   (*bmdma_status) (struct ata_port *ap);
656};
657
658struct ata_port_info {
659	struct scsi_host_template	*sht;
660	unsigned long		flags;
661	unsigned long		pio_mask;
662	unsigned long		mwdma_mask;
663	unsigned long		udma_mask;
664	const struct ata_port_operations *port_ops;
665	void 			*private_data;
666};
667
668struct ata_timing {
669	unsigned short mode;		/* ATA mode */
670	unsigned short setup;		/* t1 */
671	unsigned short act8b;		/* t2 for 8-bit I/O */
672	unsigned short rec8b;		/* t2i for 8-bit I/O */
673	unsigned short cyc8b;		/* t0 for 8-bit I/O */
674	unsigned short active;		/* t2 or tD */
675	unsigned short recover;		/* t2i or tK */
676	unsigned short cycle;		/* t0 */
677	unsigned short udma;		/* t2CYCTYP/2 */
678};
679
680#define FIT(v,vmin,vmax)	max_t(short,min_t(short,v,vmax),vmin)
681
682extern const unsigned long sata_deb_timing_normal[];
683extern const unsigned long sata_deb_timing_hotplug[];
684extern const unsigned long sata_deb_timing_long[];
685
686extern const struct ata_port_operations ata_dummy_port_ops;
687
688static inline const unsigned long *
689sata_ehc_deb_timing(struct ata_eh_context *ehc)
690{
691	if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
692		return sata_deb_timing_hotplug;
693	else
694		return sata_deb_timing_normal;
695}
696
697static inline int ata_port_is_dummy(struct ata_port *ap)
698{
699	return ap->ops == &ata_dummy_port_ops;
700}
701
702extern void ata_port_probe(struct ata_port *);
703extern void __sata_phy_reset(struct ata_port *ap);
704extern void sata_phy_reset(struct ata_port *ap);
705extern void ata_bus_reset(struct ata_port *ap);
706extern int sata_set_spd(struct ata_port *ap);
707extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param);
708extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param);
709extern int ata_std_prereset(struct ata_port *ap);
710extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
711extern int sata_port_hardreset(struct ata_port *ap,
712			       const unsigned long *timing);
713extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
714extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
715extern void ata_port_disable(struct ata_port *);
716extern void ata_std_ports(struct ata_ioports *ioaddr);
717#ifdef CONFIG_PCI
718extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
719			     unsigned int n_ports);
720extern void ata_pci_remove_one (struct pci_dev *pdev);
721extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
722extern void ata_pci_device_do_resume(struct pci_dev *pdev);
723extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
724extern int ata_pci_device_resume(struct pci_dev *pdev);
725extern int ata_pci_clear_simplex(struct pci_dev *pdev);
726#endif /* CONFIG_PCI */
727extern int ata_device_add(const struct ata_probe_ent *ent);
728extern void ata_port_detach(struct ata_port *ap);
729extern void ata_host_init(struct ata_host *, struct device *,
730			  unsigned long, const struct ata_port_operations *);
731extern void ata_host_remove(struct ata_host *host);
732extern int ata_scsi_detect(struct scsi_host_template *sht);
733extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
734extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
735extern int ata_scsi_release(struct Scsi_Host *host);
736extern void ata_sas_port_destroy(struct ata_port *);
737extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
738					   struct ata_port_info *, struct Scsi_Host *);
739extern int ata_sas_port_init(struct ata_port *);
740extern int ata_sas_port_start(struct ata_port *ap);
741extern void ata_sas_port_stop(struct ata_port *ap);
742extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
743extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
744			    struct ata_port *ap);
745extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
746extern int sata_scr_valid(struct ata_port *ap);
747extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
748extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
749extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
750extern int ata_port_online(struct ata_port *ap);
751extern int ata_port_offline(struct ata_port *ap);
752extern int ata_scsi_device_resume(struct scsi_device *);
753extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t mesg);
754extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
755extern void ata_host_resume(struct ata_host *host);
756extern int ata_ratelimit(void);
757extern int ata_busy_sleep(struct ata_port *ap,
758			  unsigned long timeout_pat, unsigned long timeout);
759extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
760				void *data, unsigned long delay);
761extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
762			     unsigned long interval_msec,
763			     unsigned long timeout_msec);
764
765/*
766 * Default driver ops implementations
767 */
768extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
769extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
770extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp);
771extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
772extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
773extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
774extern u8 ata_check_status(struct ata_port *ap);
775extern u8 ata_altstatus(struct ata_port *ap);
776extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
777extern int ata_port_start (struct ata_port *ap);
778extern void ata_port_stop (struct ata_port *ap);
779extern void ata_host_stop (struct ata_host *host);
780extern irqreturn_t ata_interrupt (int irq, void *dev_instance);
781extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
782			       unsigned int buflen, int write_data);
783extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
784			      unsigned int buflen, int write_data);
785extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
786			      unsigned int buflen, int write_data);
787extern void ata_qc_prep(struct ata_queued_cmd *qc);
788extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
789extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
790extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
791		unsigned int buflen);
792extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
793		 unsigned int n_elem);
794extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
795extern void ata_id_string(const u16 *id, unsigned char *s,
796			  unsigned int ofs, unsigned int len);
797extern void ata_id_c_string(const u16 *id, unsigned char *s,
798			    unsigned int ofs, unsigned int len);
799extern unsigned long ata_device_blacklisted(const struct ata_device *dev);
800extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
801extern void ata_bmdma_start (struct ata_queued_cmd *qc);
802extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
803extern u8   ata_bmdma_status(struct ata_port *ap);
804extern void ata_bmdma_irq_clear(struct ata_port *ap);
805extern void ata_bmdma_freeze(struct ata_port *ap);
806extern void ata_bmdma_thaw(struct ata_port *ap);
807extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
808			       ata_reset_fn_t softreset,
809			       ata_reset_fn_t hardreset,
810			       ata_postreset_fn_t postreset);
811extern void ata_bmdma_error_handler(struct ata_port *ap);
812extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
813extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
814			u8 status, int in_wq);
815extern void ata_qc_complete(struct ata_queued_cmd *qc);
816extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
817				    void (*finish_qc)(struct ata_queued_cmd *));
818extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
819			      void (*done)(struct scsi_cmnd *));
820extern int ata_std_bios_param(struct scsi_device *sdev,
821			      struct block_device *bdev,
822			      sector_t capacity, int geom[]);
823extern int ata_scsi_slave_config(struct scsi_device *sdev);
824extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
825extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
826				       int queue_depth);
827extern struct ata_device *ata_dev_pair(struct ata_device *adev);
828
829/*
830 * Timing helpers
831 */
832
833extern unsigned int ata_pio_need_iordy(const struct ata_device *);
834extern int ata_timing_compute(struct ata_device *, unsigned short,
835			      struct ata_timing *, int, int);
836extern void ata_timing_merge(const struct ata_timing *,
837			     const struct ata_timing *, struct ata_timing *,
838			     unsigned int);
839
840enum {
841	ATA_TIMING_SETUP	= (1 << 0),
842	ATA_TIMING_ACT8B	= (1 << 1),
843	ATA_TIMING_REC8B	= (1 << 2),
844	ATA_TIMING_CYC8B	= (1 << 3),
845	ATA_TIMING_8BIT		= ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
846				  ATA_TIMING_CYC8B,
847	ATA_TIMING_ACTIVE	= (1 << 4),
848	ATA_TIMING_RECOVER	= (1 << 5),
849	ATA_TIMING_CYCLE	= (1 << 6),
850	ATA_TIMING_UDMA		= (1 << 7),
851	ATA_TIMING_ALL		= ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
852				  ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
853				  ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
854				  ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
855};
856
857
858#ifdef CONFIG_PCI
859struct pci_bits {
860	unsigned int		reg;	/* PCI config register to read */
861	unsigned int		width;	/* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
862	unsigned long		mask;
863	unsigned long		val;
864};
865
866extern void ata_pci_host_stop (struct ata_host *host);
867extern struct ata_probe_ent *
868ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
869extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
870extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
871#endif /* CONFIG_PCI */
872
873/*
874 * EH
875 */
876extern void ata_eng_timeout(struct ata_port *ap);
877
878extern void ata_port_schedule_eh(struct ata_port *ap);
879extern int ata_port_abort(struct ata_port *ap);
880extern int ata_port_freeze(struct ata_port *ap);
881
882extern void ata_eh_freeze_port(struct ata_port *ap);
883extern void ata_eh_thaw_port(struct ata_port *ap);
884
885extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
886extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
887
888extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
889		      ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
890		      ata_postreset_fn_t postreset);
891
892/*
893 * printk helpers
894 */
895#define ata_port_printk(ap, lv, fmt, args...) \
896	printk(lv"ata%u: "fmt, (ap)->id , ##args)
897
898#define ata_dev_printk(dev, lv, fmt, args...) \
899	printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
900
901/*
902 * ata_eh_info helpers
903 */
904#define ata_ehi_push_desc(ehi, fmt, args...) do { \
905	(ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
906				     ATA_EH_DESC_LEN - (ehi)->desc_len, \
907				     fmt , ##args); \
908} while (0)
909
910#define ata_ehi_clear_desc(ehi) do { \
911	(ehi)->desc[0] = '\0'; \
912	(ehi)->desc_len = 0; \
913} while (0)
914
915static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi)
916{
917	if (ehi->flags & ATA_EHI_HOTPLUGGED)
918		return;
919
920	ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK;
921	ehi->hotplug_timestamp = jiffies;
922
923	ehi->action |= ATA_EH_SOFTRESET;
924	ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
925}
926
927static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
928{
929	__ata_ehi_hotplugged(ehi);
930	ehi->err_mask |= AC_ERR_ATA_BUS;
931}
932
933/*
934 * qc helpers
935 */
936static inline int
937ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
938{
939	if (sg == &qc->pad_sgent)
940		return 1;
941	if (qc->pad_len)
942		return 0;
943	if (((sg - qc->__sg) + 1) == qc->n_elem)
944		return 1;
945	return 0;
946}
947
948static inline struct scatterlist *
949ata_qc_first_sg(struct ata_queued_cmd *qc)
950{
951	if (qc->n_elem)
952		return qc->__sg;
953	if (qc->pad_len)
954		return &qc->pad_sgent;
955	return NULL;
956}
957
958static inline struct scatterlist *
959ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
960{
961	if (sg == &qc->pad_sgent)
962		return NULL;
963	if (++sg - qc->__sg < qc->n_elem)
964		return sg;
965	if (qc->pad_len)
966		return &qc->pad_sgent;
967	return NULL;
968}
969
970#define ata_for_each_sg(sg, qc) \
971	for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc))
972
973static inline unsigned int ata_tag_valid(unsigned int tag)
974{
975	return (tag < ATA_MAX_QUEUE) ? 1 : 0;
976}
977
978static inline unsigned int ata_tag_internal(unsigned int tag)
979{
980	return tag == ATA_MAX_QUEUE - 1;
981}
982
983/*
984 * device helpers
985 */
986static inline unsigned int ata_class_enabled(unsigned int class)
987{
988	return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
989}
990
991static inline unsigned int ata_class_disabled(unsigned int class)
992{
993	return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
994}
995
996static inline unsigned int ata_class_absent(unsigned int class)
997{
998	return !ata_class_enabled(class) && !ata_class_disabled(class);
999}
1000
1001static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
1002{
1003	return ata_class_enabled(dev->class);
1004}
1005
1006static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
1007{
1008	return ata_class_disabled(dev->class);
1009}
1010
1011static inline unsigned int ata_dev_absent(const struct ata_device *dev)
1012{
1013	return ata_class_absent(dev->class);
1014}
1015
1016static inline unsigned int ata_dev_ready(const struct ata_device *dev)
1017{
1018	return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED);
1019}
1020
1021/*
1022 * port helpers
1023 */
1024static inline int ata_port_max_devices(const struct ata_port *ap)
1025{
1026	if (ap->flags & ATA_FLAG_SLAVE_POSS)
1027		return 2;
1028	return 1;
1029}
1030
1031
1032static inline u8 ata_chk_status(struct ata_port *ap)
1033{
1034	return ap->ops->check_status(ap);
1035}
1036
1037
1038/**
1039 *	ata_pause - Flush writes and pause 400 nanoseconds.
1040 *	@ap: Port to wait for.
1041 *
1042 *	LOCKING:
1043 *	Inherited from caller.
1044 */
1045
1046static inline void ata_pause(struct ata_port *ap)
1047{
1048	ata_altstatus(ap);
1049	ndelay(400);
1050}
1051
1052
1053/**
1054 *	ata_busy_wait - Wait for a port status register
1055 *	@ap: Port to wait for.
1056 *
1057 *	Waits up to max*10 microseconds for the selected bits in the port's
1058 *	status register to be cleared.
1059 *	Returns final value of status register.
1060 *
1061 *	LOCKING:
1062 *	Inherited from caller.
1063 */
1064
1065static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits,
1066			       unsigned int max)
1067{
1068	u8 status;
1069
1070	do {
1071		udelay(10);
1072		status = ata_chk_status(ap);
1073		max--;
1074	} while (status != 0xff && (status & bits) && (max > 0));
1075
1076	return status;
1077}
1078
1079
1080/**
1081 *	ata_wait_idle - Wait for a port to be idle.
1082 *	@ap: Port to wait for.
1083 *
1084 *	Waits up to 10ms for port's BUSY and DRQ signals to clear.
1085 *	Returns final value of status register.
1086 *
1087 *	LOCKING:
1088 *	Inherited from caller.
1089 */
1090
1091static inline u8 ata_wait_idle(struct ata_port *ap)
1092{
1093	u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
1094
1095	if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) {
1096		unsigned long l = ap->ioaddr.status_addr;
1097		if (ata_msg_warn(ap))
1098			printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
1099				status, l);
1100	}
1101
1102	return status;
1103}
1104
1105static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
1106{
1107	qc->tf.ctl |= ATA_NIEN;
1108}
1109
1110static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
1111						       unsigned int tag)
1112{
1113	if (likely(ata_tag_valid(tag)))
1114		return &ap->qcmd[tag];
1115	return NULL;
1116}
1117
1118static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
1119						     unsigned int tag)
1120{
1121	struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1122
1123	if (unlikely(!qc) || !ap->ops->error_handler)
1124		return qc;
1125
1126	if ((qc->flags & (ATA_QCFLAG_ACTIVE |
1127			  ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
1128		return qc;
1129
1130	return NULL;
1131}
1132
1133static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
1134{
1135	memset(tf, 0, sizeof(*tf));
1136
1137	tf->ctl = dev->ap->ctl;
1138	if (dev->devno == 0)
1139		tf->device = ATA_DEVICE_OBS;
1140	else
1141		tf->device = ATA_DEVICE_OBS | ATA_DEV1;
1142}
1143
1144static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
1145{
1146	qc->dma_dir = DMA_NONE;
1147	qc->__sg = NULL;
1148	qc->flags = 0;
1149	qc->cursect = qc->cursg = qc->cursg_ofs = 0;
1150	qc->nsect = 0;
1151	qc->nbytes = qc->curbytes = 0;
1152	qc->n_elem = 0;
1153	qc->err_mask = 0;
1154
1155	ata_tf_init(qc->dev, &qc->tf);
1156
1157	/* init result_tf such that it indicates normal completion */
1158	qc->result_tf.command = ATA_DRDY;
1159	qc->result_tf.feature = 0;
1160}
1161
1162/**
1163 *	ata_irq_ack - Acknowledge a device interrupt.
1164 *	@ap: Port on which interrupts are enabled.
1165 *
1166 *	Wait up to 10 ms for legacy IDE device to become idle (BUSY
1167 *	or BUSY+DRQ clear).  Obtain dma status and port status from
1168 *	device.  Clear the interrupt.  Return port status.
1169 *
1170 *	LOCKING:
1171 */
1172
1173static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
1174{
1175	unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
1176	u8 host_stat, post_stat, status;
1177
1178	status = ata_busy_wait(ap, bits, 1000);
1179	if (status & bits)
1180		if (ata_msg_err(ap))
1181			printk(KERN_ERR "abnormal status 0x%X\n", status);
1182
1183	/* get controller status; clear intr, err bits */
1184	if (ap->flags & ATA_FLAG_MMIO) {
1185		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
1186		host_stat = readb(mmio + ATA_DMA_STATUS);
1187		writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
1188		       mmio + ATA_DMA_STATUS);
1189
1190		post_stat = readb(mmio + ATA_DMA_STATUS);
1191	} else {
1192		host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1193		outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
1194		     ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1195
1196		post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1197	}
1198
1199	if (ata_msg_intr(ap))
1200		printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
1201			__FUNCTION__,
1202			host_stat, post_stat, status);
1203
1204	return status;
1205}
1206
1207static inline int ata_try_flush_cache(const struct ata_device *dev)
1208{
1209	return ata_id_wcache_enabled(dev->id) ||
1210	       ata_id_has_flush(dev->id) ||
1211	       ata_id_has_flush_ext(dev->id);
1212}
1213
1214static inline unsigned int ac_err_mask(u8 status)
1215{
1216	if (status & (ATA_BUSY | ATA_DRQ))
1217		return AC_ERR_HSM;
1218	if (status & (ATA_ERR | ATA_DF))
1219		return AC_ERR_DEV;
1220	return 0;
1221}
1222
1223static inline unsigned int __ac_err_mask(u8 status)
1224{
1225	unsigned int mask = ac_err_mask(status);
1226	if (mask == 0)
1227		return AC_ERR_OTHER;
1228	return mask;
1229}
1230
1231static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev)
1232{
1233	ap->pad_dma = 0;
1234	ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ,
1235				     &ap->pad_dma, GFP_KERNEL);
1236	return (ap->pad == NULL) ? -ENOMEM : 0;
1237}
1238
1239static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
1240{
1241	dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
1242}
1243
1244static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
1245{
1246	return (struct ata_port *) &host->hostdata[0];
1247}
1248
1249#endif /* __LINUX_LIBATA_H__ */
1250