libata.h revision 02670bf379267f55a43aa57f6895689697e90eb3
1/*
2 *  Copyright 2003-2005 Red Hat, Inc.  All rights reserved.
3 *  Copyright 2003-2005 Jeff Garzik
4 *
5 *
6 *  This program is free software; you can redistribute it and/or modify
7 *  it under the terms of the GNU General Public License as published by
8 *  the Free Software Foundation; either version 2, or (at your option)
9 *  any later version.
10 *
11 *  This program is distributed in the hope that it will be useful,
12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 *  GNU General Public License for more details.
15 *
16 *  You should have received a copy of the GNU General Public License
17 *  along with this program; see the file COPYING.  If not, write to
18 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 *
21 *  libata documentation is available via 'make {ps|pdf}docs',
22 *  as Documentation/DocBook/libata.*
23 *
24 */
25
26#ifndef __LINUX_LIBATA_H__
27#define __LINUX_LIBATA_H__
28
29#include <linux/delay.h>
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <asm/scatterlist.h>
34#include <asm/io.h>
35#include <linux/ata.h>
36#include <linux/workqueue.h>
37#include <scsi/scsi_host.h>
38
39/*
40 * compile-time options: to be removed as soon as all the drivers are
41 * converted to the new debugging mechanism
42 */
43#undef ATA_DEBUG		/* debugging output */
44#undef ATA_VERBOSE_DEBUG	/* yet more debugging output */
45#undef ATA_IRQ_TRAP		/* define to ack screaming irqs */
46#undef ATA_NDEBUG		/* define to disable quick runtime checks */
47#undef ATA_ENABLE_PATA		/* define to enable PATA support in some
48				 * low-level drivers */
49
50
51/* note: prints function name for you */
52#ifdef ATA_DEBUG
53#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
54#ifdef ATA_VERBOSE_DEBUG
55#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
56#else
57#define VPRINTK(fmt, args...)
58#endif	/* ATA_VERBOSE_DEBUG */
59#else
60#define DPRINTK(fmt, args...)
61#define VPRINTK(fmt, args...)
62#endif	/* ATA_DEBUG */
63
64#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
65
66/* NEW: debug levels */
67#define HAVE_LIBATA_MSG 1
68
69enum {
70	ATA_MSG_DRV	= 0x0001,
71	ATA_MSG_INFO	= 0x0002,
72	ATA_MSG_PROBE	= 0x0004,
73	ATA_MSG_WARN	= 0x0008,
74	ATA_MSG_MALLOC	= 0x0010,
75	ATA_MSG_CTL	= 0x0020,
76	ATA_MSG_INTR	= 0x0040,
77	ATA_MSG_ERR	= 0x0080,
78};
79
80#define ata_msg_drv(p)    ((p)->msg_enable & ATA_MSG_DRV)
81#define ata_msg_info(p)   ((p)->msg_enable & ATA_MSG_INFO)
82#define ata_msg_probe(p)  ((p)->msg_enable & ATA_MSG_PROBE)
83#define ata_msg_warn(p)   ((p)->msg_enable & ATA_MSG_WARN)
84#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
85#define ata_msg_ctl(p)    ((p)->msg_enable & ATA_MSG_CTL)
86#define ata_msg_intr(p)   ((p)->msg_enable & ATA_MSG_INTR)
87#define ata_msg_err(p)    ((p)->msg_enable & ATA_MSG_ERR)
88
89static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
90{
91	if (dval < 0 || dval >= (sizeof(u32) * 8))
92		return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
93	if (!dval)
94		return 0;
95	return (1 << dval) - 1;
96}
97
98/* defines only for the constants which don't work well as enums */
99#define ATA_TAG_POISON		0xfafbfcfdU
100
101/* move to PCI layer? */
102static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
103{
104	return &pdev->dev;
105}
106
107enum {
108	/* various global constants */
109	LIBATA_MAX_PRD		= ATA_MAX_PRD / 2,
110	ATA_MAX_PORTS		= 8,
111	ATA_DEF_QUEUE		= 1,
112	/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
113	ATA_MAX_QUEUE		= 32,
114	ATA_TAG_INTERNAL	= ATA_MAX_QUEUE - 1,
115	ATA_MAX_SECTORS		= 200,	/* FIXME */
116	ATA_MAX_SECTORS_LBA48	= 65535,
117	ATA_MAX_BUS		= 2,
118	ATA_DEF_BUSY_WAIT	= 10000,
119	ATA_SHORT_PAUSE		= (HZ >> 6) + 1,
120
121	ATA_SHT_EMULATED	= 1,
122	ATA_SHT_CMD_PER_LUN	= 1,
123	ATA_SHT_THIS_ID		= -1,
124	ATA_SHT_USE_CLUSTERING	= 1,
125
126	/* struct ata_device stuff */
127	ATA_DFLAG_LBA		= (1 << 0), /* device supports LBA */
128	ATA_DFLAG_LBA48		= (1 << 1), /* device supports LBA48 */
129	ATA_DFLAG_CDB_INTR	= (1 << 2), /* device asserts INTRQ when ready for CDB */
130	ATA_DFLAG_NCQ		= (1 << 3), /* device supports NCQ */
131	ATA_DFLAG_CFG_MASK	= (1 << 8) - 1,
132
133	ATA_DFLAG_PIO		= (1 << 8), /* device currently in PIO mode */
134	ATA_DFLAG_SUSPENDED	= (1 << 9), /* device suspended */
135	ATA_DFLAG_INIT_MASK	= (1 << 16) - 1,
136
137	ATA_DFLAG_DETACH	= (1 << 16),
138	ATA_DFLAG_DETACHED	= (1 << 17),
139
140	ATA_DEV_UNKNOWN		= 0,	/* unknown device */
141	ATA_DEV_ATA		= 1,	/* ATA device */
142	ATA_DEV_ATA_UNSUP	= 2,	/* ATA device (unsupported) */
143	ATA_DEV_ATAPI		= 3,	/* ATAPI device */
144	ATA_DEV_ATAPI_UNSUP	= 4,	/* ATAPI device (unsupported) */
145	ATA_DEV_NONE		= 5,	/* no device */
146
147	/* struct ata_port flags */
148	ATA_FLAG_SLAVE_POSS	= (1 << 0), /* host supports slave dev */
149					    /* (doesn't imply presence) */
150	ATA_FLAG_SATA		= (1 << 1),
151	ATA_FLAG_NO_LEGACY	= (1 << 2), /* no legacy mode check */
152	ATA_FLAG_MMIO		= (1 << 3), /* use MMIO, not PIO */
153	ATA_FLAG_SRST		= (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
154	ATA_FLAG_SATA_RESET	= (1 << 5), /* (obsolete) use COMRESET */
155	ATA_FLAG_NO_ATAPI	= (1 << 6), /* No ATAPI support */
156	ATA_FLAG_PIO_DMA	= (1 << 7), /* PIO cmds via DMA */
157	ATA_FLAG_PIO_LBA48	= (1 << 8), /* Host DMA engine is LBA28 only */
158	ATA_FLAG_PIO_POLLING	= (1 << 9), /* use polling PIO if LLD
159					     * doesn't handle PIO interrupts */
160	ATA_FLAG_NCQ		= (1 << 10), /* host supports NCQ */
161	ATA_FLAG_HRST_TO_RESUME	= (1 << 11), /* hardreset to resume phy */
162	ATA_FLAG_SKIP_D2H_BSY	= (1 << 12), /* can't wait for the first D2H
163					      * Register FIS clearing BSY */
164	ATA_FLAG_DEBUGMSG	= (1 << 13),
165
166	/* The following flag belongs to ap->pflags but is kept in
167	 * ap->flags because it's referenced in many LLDs and will be
168	 * removed in not-too-distant future.
169	 */
170	ATA_FLAG_DISABLED	= (1 << 23), /* port is disabled, ignore it */
171
172	/* bits 24:31 of ap->flags are reserved for LLD specific flags */
173
174	/* struct ata_port pflags */
175	ATA_PFLAG_EH_PENDING	= (1 << 0), /* EH pending */
176	ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
177	ATA_PFLAG_FROZEN	= (1 << 2), /* port is frozen */
178	ATA_PFLAG_RECOVERED	= (1 << 3), /* recovery action performed */
179	ATA_PFLAG_LOADING	= (1 << 4), /* boot/loading probe */
180	ATA_PFLAG_UNLOADING	= (1 << 5), /* module is unloading */
181	ATA_PFLAG_SCSI_HOTPLUG	= (1 << 6), /* SCSI hotplug scheduled */
182
183	ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */
184	ATA_PFLAG_SUSPENDED	= (1 << 17), /* port is suspended (power) */
185
186	/* struct ata_queued_cmd flags */
187	ATA_QCFLAG_ACTIVE	= (1 << 0), /* cmd not yet ack'd to scsi lyer */
188	ATA_QCFLAG_SG		= (1 << 1), /* have s/g table? */
189	ATA_QCFLAG_SINGLE	= (1 << 2), /* no s/g, just a single buffer */
190	ATA_QCFLAG_DMAMAP	= ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
191	ATA_QCFLAG_IO		= (1 << 3), /* standard IO command */
192	ATA_QCFLAG_RESULT_TF	= (1 << 4), /* result TF requested */
193
194	ATA_QCFLAG_FAILED	= (1 << 16), /* cmd failed and is owned by EH */
195	ATA_QCFLAG_SENSE_VALID	= (1 << 17), /* sense data valid */
196	ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
197
198	/* host set flags */
199	ATA_HOST_SIMPLEX	= (1 << 0),	/* Host is simplex, one DMA channel per host_set only */
200
201	/* various lengths of time */
202	ATA_TMOUT_BOOT		= 30 * HZ,	/* heuristic */
203	ATA_TMOUT_BOOT_QUICK	= 7 * HZ,	/* heuristic */
204	ATA_TMOUT_INTERNAL	= 30 * HZ,
205	ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
206
207	/* ATA bus states */
208	BUS_UNKNOWN		= 0,
209	BUS_DMA			= 1,
210	BUS_IDLE		= 2,
211	BUS_NOINTR		= 3,
212	BUS_NODATA		= 4,
213	BUS_TIMER		= 5,
214	BUS_PIO			= 6,
215	BUS_EDD			= 7,
216	BUS_IDENTIFY		= 8,
217	BUS_PACKET		= 9,
218
219	/* SATA port states */
220	PORT_UNKNOWN		= 0,
221	PORT_ENABLED		= 1,
222	PORT_DISABLED		= 2,
223
224	/* encoding various smaller bitmaps into a single
225	 * unsigned int bitmap
226	 */
227	ATA_BITS_PIO		= 5,
228	ATA_BITS_MWDMA		= 3,
229	ATA_BITS_UDMA		= 8,
230
231	ATA_SHIFT_PIO		= 0,
232	ATA_SHIFT_MWDMA		= ATA_SHIFT_PIO + ATA_BITS_PIO,
233	ATA_SHIFT_UDMA		= ATA_SHIFT_MWDMA + ATA_BITS_MWDMA,
234
235	ATA_MASK_PIO		= ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO,
236	ATA_MASK_MWDMA		= ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA,
237	ATA_MASK_UDMA		= ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA,
238
239	/* size of buffer to pad xfers ending on unaligned boundaries */
240	ATA_DMA_PAD_SZ		= 4,
241	ATA_DMA_PAD_BUF_SZ	= ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
242
243	/* masks for port functions */
244	ATA_PORT_PRIMARY	= (1 << 0),
245	ATA_PORT_SECONDARY	= (1 << 1),
246
247	/* ering size */
248	ATA_ERING_SIZE		= 32,
249
250	/* desc_len for ata_eh_info and context */
251	ATA_EH_DESC_LEN		= 80,
252
253	/* reset / recovery action types */
254	ATA_EH_REVALIDATE	= (1 << 0),
255	ATA_EH_SOFTRESET	= (1 << 1),
256	ATA_EH_HARDRESET	= (1 << 2),
257	ATA_EH_SUSPEND		= (1 << 3),
258	ATA_EH_RESUME		= (1 << 4),
259	ATA_EH_PM_FREEZE	= (1 << 5),
260
261	ATA_EH_RESET_MASK	= ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
262	ATA_EH_PERDEV_MASK	= ATA_EH_REVALIDATE | ATA_EH_SUSPEND |
263				  ATA_EH_RESUME | ATA_EH_PM_FREEZE,
264
265	/* ata_eh_info->flags */
266	ATA_EHI_HOTPLUGGED	= (1 << 0),  /* could have been hotplugged */
267	ATA_EHI_RESUME_LINK	= (1 << 1),  /* need to resume link */
268	ATA_EHI_NO_AUTOPSY	= (1 << 2),  /* no autopsy */
269	ATA_EHI_QUIET		= (1 << 3),  /* be quiet */
270
271	ATA_EHI_DID_RESET	= (1 << 16), /* already reset this port */
272
273	/* max repeat if error condition is still set after ->error_handler */
274	ATA_EH_MAX_REPEAT	= 5,
275
276	/* how hard are we gonna try to probe/recover devices */
277	ATA_PROBE_MAX_TRIES	= 3,
278	ATA_EH_RESET_TRIES	= 3,
279	ATA_EH_DEV_TRIES	= 3,
280
281	/* Drive spinup time (time from power-on to the first D2H FIS)
282	 * in msecs - 8s currently.  Failing to get ready in this time
283	 * isn't critical.  It will result in reset failure for
284	 * controllers which can't wait for the first D2H FIS.  libata
285	 * will retry, so it just has to be long enough to spin up
286	 * most devices.
287	 */
288	ATA_SPINUP_WAIT		= 8000,
289};
290
291enum hsm_task_states {
292	HSM_ST_UNKNOWN,		/* state unknown */
293	HSM_ST_IDLE,		/* no command on going */
294	HSM_ST,			/* (waiting the device to) transfer data */
295	HSM_ST_LAST,		/* (waiting the device to) complete command */
296	HSM_ST_ERR,		/* error */
297	HSM_ST_FIRST,		/* (waiting the device to)
298				   write CDB or first data block */
299};
300
301enum ata_completion_errors {
302	AC_ERR_DEV		= (1 << 0), /* device reported error */
303	AC_ERR_HSM		= (1 << 1), /* host state machine violation */
304	AC_ERR_TIMEOUT		= (1 << 2), /* timeout */
305	AC_ERR_MEDIA		= (1 << 3), /* media error */
306	AC_ERR_ATA_BUS		= (1 << 4), /* ATA bus error */
307	AC_ERR_HOST_BUS		= (1 << 5), /* host bus error */
308	AC_ERR_SYSTEM		= (1 << 6), /* system error */
309	AC_ERR_INVALID		= (1 << 7), /* invalid argument */
310	AC_ERR_OTHER		= (1 << 8), /* unknown */
311};
312
313/* forward declarations */
314struct scsi_device;
315struct ata_port_operations;
316struct ata_port;
317struct ata_queued_cmd;
318
319/* typedefs */
320typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
321typedef int (*ata_prereset_fn_t)(struct ata_port *ap);
322typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes);
323typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes);
324
325struct ata_ioports {
326	unsigned long		cmd_addr;
327	unsigned long		data_addr;
328	unsigned long		error_addr;
329	unsigned long		feature_addr;
330	unsigned long		nsect_addr;
331	unsigned long		lbal_addr;
332	unsigned long		lbam_addr;
333	unsigned long		lbah_addr;
334	unsigned long		device_addr;
335	unsigned long		status_addr;
336	unsigned long		command_addr;
337	unsigned long		altstatus_addr;
338	unsigned long		ctl_addr;
339	unsigned long		bmdma_addr;
340	unsigned long		scr_addr;
341};
342
343struct ata_probe_ent {
344	struct list_head	node;
345	struct device 		*dev;
346	const struct ata_port_operations *port_ops;
347	struct scsi_host_template *sht;
348	struct ata_ioports	port[ATA_MAX_PORTS];
349	unsigned int		n_ports;
350	unsigned int		hard_port_no;
351	unsigned int		pio_mask;
352	unsigned int		mwdma_mask;
353	unsigned int		udma_mask;
354	unsigned int		legacy_mode;
355	unsigned long		irq;
356	unsigned int		irq_flags;
357	unsigned long		host_flags;
358	unsigned long		host_set_flags;
359	void __iomem		*mmio_base;
360	void			*private_data;
361};
362
363struct ata_host_set {
364	spinlock_t		lock;
365	struct device 		*dev;
366	unsigned long		irq;
367	void __iomem		*mmio_base;
368	unsigned int		n_ports;
369	void			*private_data;
370	const struct ata_port_operations *ops;
371	unsigned long		flags;
372	int			simplex_claimed;	/* Keep seperate in case we
373							   ever need to do this locked */
374	struct ata_host_set	*next;		/* for legacy mode */
375	struct ata_port		*ports[0];
376};
377
378struct ata_queued_cmd {
379	struct ata_port		*ap;
380	struct ata_device	*dev;
381
382	struct scsi_cmnd	*scsicmd;
383	void			(*scsidone)(struct scsi_cmnd *);
384
385	struct ata_taskfile	tf;
386	u8			cdb[ATAPI_CDB_LEN];
387
388	unsigned long		flags;		/* ATA_QCFLAG_xxx */
389	unsigned int		tag;
390	unsigned int		n_elem;
391	unsigned int		orig_n_elem;
392
393	int			dma_dir;
394
395	unsigned int		pad_len;
396
397	unsigned int		nsect;
398	unsigned int		cursect;
399
400	unsigned int		nbytes;
401	unsigned int		curbytes;
402
403	unsigned int		cursg;
404	unsigned int		cursg_ofs;
405
406	struct scatterlist	sgent;
407	struct scatterlist	pad_sgent;
408	void			*buf_virt;
409
410	/* DO NOT iterate over __sg manually, use ata_for_each_sg() */
411	struct scatterlist	*__sg;
412
413	unsigned int		err_mask;
414	struct ata_taskfile	result_tf;
415	ata_qc_cb_t		complete_fn;
416
417	void			*private_data;
418};
419
420struct ata_host_stats {
421	unsigned long		unhandled_irq;
422	unsigned long		idle_irq;
423	unsigned long		rw_reqbuf;
424};
425
426struct ata_ering_entry {
427	int			is_io;
428	unsigned int		err_mask;
429	u64			timestamp;
430};
431
432struct ata_ering {
433	int			cursor;
434	struct ata_ering_entry	ring[ATA_ERING_SIZE];
435};
436
437struct ata_device {
438	struct ata_port		*ap;
439	unsigned int		devno;		/* 0 or 1 */
440	unsigned long		flags;		/* ATA_DFLAG_xxx */
441	struct scsi_device	*sdev;		/* attached SCSI device */
442	/* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
443	u64			n_sectors;	/* size of device, if ATA */
444	unsigned int		class;		/* ATA_DEV_xxx */
445	u16			id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
446	u8			pio_mode;
447	u8			dma_mode;
448	u8			xfer_mode;
449	unsigned int		xfer_shift;	/* ATA_SHIFT_xxx */
450
451	unsigned int		multi_count;	/* sectors count for
452						   READ/WRITE MULTIPLE */
453	unsigned int		max_sectors;	/* per-device max sectors */
454	unsigned int		cdb_len;
455
456	/* per-dev xfer mask */
457	unsigned int		pio_mask;
458	unsigned int		mwdma_mask;
459	unsigned int		udma_mask;
460
461	/* for CHS addressing */
462	u16			cylinders;	/* Number of cylinders */
463	u16			heads;		/* Number of heads */
464	u16			sectors;	/* Number of sectors per track */
465
466	/* error history */
467	struct ata_ering	ering;
468};
469
470/* Offset into struct ata_device.  Fields above it are maintained
471 * acress device init.  Fields below are zeroed.
472 */
473#define ATA_DEVICE_CLEAR_OFFSET		offsetof(struct ata_device, n_sectors)
474
475struct ata_eh_info {
476	struct ata_device	*dev;		/* offending device */
477	u32			serror;		/* SError from LLDD */
478	unsigned int		err_mask;	/* port-wide err_mask */
479	unsigned int		action;		/* ATA_EH_* action mask */
480	unsigned int		dev_action[ATA_MAX_DEVICES]; /* dev EH action */
481	unsigned int		flags;		/* ATA_EHI_* flags */
482
483	unsigned long		hotplug_timestamp;
484	unsigned int		probe_mask;
485
486	char			desc[ATA_EH_DESC_LEN];
487	int			desc_len;
488};
489
490struct ata_eh_context {
491	struct ata_eh_info	i;
492	int			tries[ATA_MAX_DEVICES];
493	unsigned int		classes[ATA_MAX_DEVICES];
494	unsigned int		did_probe_mask;
495};
496
497struct ata_port {
498	struct Scsi_Host	*host;	/* our co-allocated scsi host */
499	const struct ata_port_operations *ops;
500	spinlock_t		*lock;
501	unsigned long		flags;	/* ATA_FLAG_xxx */
502	unsigned int		pflags; /* ATA_PFLAG_xxx */
503	unsigned int		id;	/* unique id req'd by scsi midlyr */
504	unsigned int		port_no; /* unique port #; from zero */
505	unsigned int		hard_port_no;	/* hardware port #; from zero */
506
507	struct ata_prd		*prd;	 /* our SG list */
508	dma_addr_t		prd_dma; /* and its DMA mapping */
509
510	void			*pad;	/* array of DMA pad buffers */
511	dma_addr_t		pad_dma;
512
513	struct ata_ioports	ioaddr;	/* ATA cmd/ctl/dma register blocks */
514
515	u8			ctl;	/* cache of ATA control register */
516	u8			last_ctl;	/* Cache last written value */
517	unsigned int		pio_mask;
518	unsigned int		mwdma_mask;
519	unsigned int		udma_mask;
520	unsigned int		cbl;	/* cable type; ATA_CBL_xxx */
521	unsigned int		hw_sata_spd_limit;
522	unsigned int		sata_spd_limit;	/* SATA PHY speed limit */
523
524	/* record runtime error info, protected by host_set lock */
525	struct ata_eh_info	eh_info;
526	/* EH context owned by EH */
527	struct ata_eh_context	eh_context;
528
529	struct ata_device	device[ATA_MAX_DEVICES];
530
531	struct ata_queued_cmd	qcmd[ATA_MAX_QUEUE];
532	unsigned long		qc_allocated;
533	unsigned int		qc_active;
534
535	unsigned int		active_tag;
536	u32			sactive;
537
538	struct ata_host_stats	stats;
539	struct ata_host_set	*host_set;
540	struct device 		*dev;
541
542	struct work_struct	port_task;
543	struct work_struct	hotplug_task;
544	struct work_struct	scsi_rescan_task;
545
546	unsigned int		hsm_task_state;
547
548	u32			msg_enable;
549	struct list_head	eh_done_q;
550	wait_queue_head_t	eh_wait_q;
551
552	void			*private_data;
553
554	u8			sector_buf[ATA_SECT_SIZE]; /* owned by EH */
555};
556
557struct ata_port_operations {
558	void (*port_disable) (struct ata_port *);
559
560	void (*dev_config) (struct ata_port *, struct ata_device *);
561
562	void (*set_piomode) (struct ata_port *, struct ata_device *);
563	void (*set_dmamode) (struct ata_port *, struct ata_device *);
564	unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long);
565
566	void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
567	void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
568
569	void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
570	u8   (*check_status)(struct ata_port *ap);
571	u8   (*check_altstatus)(struct ata_port *ap);
572	void (*dev_select)(struct ata_port *ap, unsigned int device);
573
574	void (*phy_reset) (struct ata_port *ap); /* obsolete */
575	void (*set_mode) (struct ata_port *ap);
576
577	void (*post_set_mode) (struct ata_port *ap);
578
579	int (*check_atapi_dma) (struct ata_queued_cmd *qc);
580
581	void (*bmdma_setup) (struct ata_queued_cmd *qc);
582	void (*bmdma_start) (struct ata_queued_cmd *qc);
583
584	void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
585
586	void (*qc_prep) (struct ata_queued_cmd *qc);
587	unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
588
589	/* Error handlers.  ->error_handler overrides ->eng_timeout and
590	 * indicates that new-style EH is in place.
591	 */
592	void (*eng_timeout) (struct ata_port *ap); /* obsolete */
593
594	void (*freeze) (struct ata_port *ap);
595	void (*thaw) (struct ata_port *ap);
596	void (*error_handler) (struct ata_port *ap);
597	void (*post_internal_cmd) (struct ata_queued_cmd *qc);
598
599	irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
600	void (*irq_clear) (struct ata_port *);
601
602	u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
603	void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
604			   u32 val);
605
606	int (*port_start) (struct ata_port *ap);
607	void (*port_stop) (struct ata_port *ap);
608
609	void (*host_stop) (struct ata_host_set *host_set);
610
611	void (*bmdma_stop) (struct ata_queued_cmd *qc);
612	u8   (*bmdma_status) (struct ata_port *ap);
613};
614
615struct ata_port_info {
616	struct scsi_host_template	*sht;
617	unsigned long		host_flags;
618	unsigned long		pio_mask;
619	unsigned long		mwdma_mask;
620	unsigned long		udma_mask;
621	const struct ata_port_operations *port_ops;
622	void 			*private_data;
623};
624
625struct ata_timing {
626	unsigned short mode;		/* ATA mode */
627	unsigned short setup;		/* t1 */
628	unsigned short act8b;		/* t2 for 8-bit I/O */
629	unsigned short rec8b;		/* t2i for 8-bit I/O */
630	unsigned short cyc8b;		/* t0 for 8-bit I/O */
631	unsigned short active;		/* t2 or tD */
632	unsigned short recover;		/* t2i or tK */
633	unsigned short cycle;		/* t0 */
634	unsigned short udma;		/* t2CYCTYP/2 */
635};
636
637#define FIT(v,vmin,vmax)	max_t(short,min_t(short,v,vmax),vmin)
638
639extern const unsigned long sata_deb_timing_normal[];
640extern const unsigned long sata_deb_timing_hotplug[];
641extern const unsigned long sata_deb_timing_long[];
642
643static inline const unsigned long *
644sata_ehc_deb_timing(struct ata_eh_context *ehc)
645{
646	if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
647		return sata_deb_timing_hotplug;
648	else
649		return sata_deb_timing_normal;
650}
651
652extern void ata_port_probe(struct ata_port *);
653extern void __sata_phy_reset(struct ata_port *ap);
654extern void sata_phy_reset(struct ata_port *ap);
655extern void ata_bus_reset(struct ata_port *ap);
656extern int sata_set_spd(struct ata_port *ap);
657extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param);
658extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param);
659extern int ata_std_prereset(struct ata_port *ap);
660extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
661extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
662extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
663extern int ata_dev_revalidate(struct ata_device *dev, int post_reset);
664extern void ata_port_disable(struct ata_port *);
665extern void ata_std_ports(struct ata_ioports *ioaddr);
666#ifdef CONFIG_PCI
667extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
668			     unsigned int n_ports);
669extern void ata_pci_remove_one (struct pci_dev *pdev);
670extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
671extern int ata_pci_device_resume(struct pci_dev *pdev);
672extern int ata_pci_clear_simplex(struct pci_dev *pdev);
673#endif /* CONFIG_PCI */
674extern int ata_device_add(const struct ata_probe_ent *ent);
675extern void ata_port_detach(struct ata_port *ap);
676extern void ata_host_set_remove(struct ata_host_set *host_set);
677extern int ata_scsi_detect(struct scsi_host_template *sht);
678extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
679extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
680extern int ata_scsi_release(struct Scsi_Host *host);
681extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
682extern int sata_scr_valid(struct ata_port *ap);
683extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
684extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
685extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
686extern int ata_port_online(struct ata_port *ap);
687extern int ata_port_offline(struct ata_port *ap);
688extern int ata_scsi_device_resume(struct scsi_device *);
689extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
690extern int ata_device_resume(struct ata_device *);
691extern int ata_device_suspend(struct ata_device *, pm_message_t state);
692extern int ata_ratelimit(void);
693extern unsigned int ata_busy_sleep(struct ata_port *ap,
694				   unsigned long timeout_pat,
695				   unsigned long timeout);
696extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
697				void *data, unsigned long delay);
698extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
699			     unsigned long interval_msec,
700			     unsigned long timeout_msec);
701
702/*
703 * Default driver ops implementations
704 */
705extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
706extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
707extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp);
708extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
709extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
710extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
711extern u8 ata_check_status(struct ata_port *ap);
712extern u8 ata_altstatus(struct ata_port *ap);
713extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
714extern int ata_port_start (struct ata_port *ap);
715extern void ata_port_stop (struct ata_port *ap);
716extern void ata_host_stop (struct ata_host_set *host_set);
717extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
718extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
719			       unsigned int buflen, int write_data);
720extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
721			      unsigned int buflen, int write_data);
722extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
723			      unsigned int buflen, int write_data);
724extern void ata_qc_prep(struct ata_queued_cmd *qc);
725extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
726extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
727extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
728		unsigned int buflen);
729extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
730		 unsigned int n_elem);
731extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
732extern void ata_id_string(const u16 *id, unsigned char *s,
733			  unsigned int ofs, unsigned int len);
734extern void ata_id_c_string(const u16 *id, unsigned char *s,
735			    unsigned int ofs, unsigned int len);
736extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
737extern void ata_bmdma_start (struct ata_queued_cmd *qc);
738extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
739extern u8   ata_bmdma_status(struct ata_port *ap);
740extern void ata_bmdma_irq_clear(struct ata_port *ap);
741extern void ata_bmdma_freeze(struct ata_port *ap);
742extern void ata_bmdma_thaw(struct ata_port *ap);
743extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
744			       ata_reset_fn_t softreset,
745			       ata_reset_fn_t hardreset,
746			       ata_postreset_fn_t postreset);
747extern void ata_bmdma_error_handler(struct ata_port *ap);
748extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
749extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
750			u8 status, int in_wq);
751extern void ata_qc_complete(struct ata_queued_cmd *qc);
752extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
753				    void (*finish_qc)(struct ata_queued_cmd *));
754extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
755			      void (*done)(struct scsi_cmnd *));
756extern int ata_std_bios_param(struct scsi_device *sdev,
757			      struct block_device *bdev,
758			      sector_t capacity, int geom[]);
759extern int ata_scsi_slave_config(struct scsi_device *sdev);
760extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
761extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
762				       int queue_depth);
763extern struct ata_device *ata_dev_pair(struct ata_device *adev);
764
765/*
766 * Timing helpers
767 */
768
769extern unsigned int ata_pio_need_iordy(const struct ata_device *);
770extern int ata_timing_compute(struct ata_device *, unsigned short,
771			      struct ata_timing *, int, int);
772extern void ata_timing_merge(const struct ata_timing *,
773			     const struct ata_timing *, struct ata_timing *,
774			     unsigned int);
775
776enum {
777	ATA_TIMING_SETUP	= (1 << 0),
778	ATA_TIMING_ACT8B	= (1 << 1),
779	ATA_TIMING_REC8B	= (1 << 2),
780	ATA_TIMING_CYC8B	= (1 << 3),
781	ATA_TIMING_8BIT		= ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
782				  ATA_TIMING_CYC8B,
783	ATA_TIMING_ACTIVE	= (1 << 4),
784	ATA_TIMING_RECOVER	= (1 << 5),
785	ATA_TIMING_CYCLE	= (1 << 6),
786	ATA_TIMING_UDMA		= (1 << 7),
787	ATA_TIMING_ALL		= ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
788				  ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
789				  ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
790				  ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
791};
792
793
794#ifdef CONFIG_PCI
795struct pci_bits {
796	unsigned int		reg;	/* PCI config register to read */
797	unsigned int		width;	/* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
798	unsigned long		mask;
799	unsigned long		val;
800};
801
802extern void ata_pci_host_stop (struct ata_host_set *host_set);
803extern struct ata_probe_ent *
804ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
805extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
806extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
807#endif /* CONFIG_PCI */
808
809/*
810 * EH
811 */
812extern void ata_eng_timeout(struct ata_port *ap);
813
814extern void ata_port_schedule_eh(struct ata_port *ap);
815extern int ata_port_abort(struct ata_port *ap);
816extern int ata_port_freeze(struct ata_port *ap);
817
818extern void ata_eh_freeze_port(struct ata_port *ap);
819extern void ata_eh_thaw_port(struct ata_port *ap);
820
821extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
822extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
823
824extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
825		      ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
826		      ata_postreset_fn_t postreset);
827
828/*
829 * printk helpers
830 */
831#define ata_port_printk(ap, lv, fmt, args...) \
832	printk(lv"ata%u: "fmt, (ap)->id , ##args)
833
834#define ata_dev_printk(dev, lv, fmt, args...) \
835	printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
836
837/*
838 * ata_eh_info helpers
839 */
840#define ata_ehi_push_desc(ehi, fmt, args...) do { \
841	(ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
842				     ATA_EH_DESC_LEN - (ehi)->desc_len, \
843				     fmt , ##args); \
844} while (0)
845
846#define ata_ehi_clear_desc(ehi) do { \
847	(ehi)->desc[0] = '\0'; \
848	(ehi)->desc_len = 0; \
849} while (0)
850
851static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi)
852{
853	if (ehi->flags & ATA_EHI_HOTPLUGGED)
854		return;
855
856	ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK;
857	ehi->hotplug_timestamp = jiffies;
858
859	ehi->action |= ATA_EH_SOFTRESET;
860	ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
861}
862
863static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
864{
865	__ata_ehi_hotplugged(ehi);
866	ehi->err_mask |= AC_ERR_ATA_BUS;
867}
868
869/*
870 * qc helpers
871 */
872static inline int
873ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
874{
875	if (sg == &qc->pad_sgent)
876		return 1;
877	if (qc->pad_len)
878		return 0;
879	if (((sg - qc->__sg) + 1) == qc->n_elem)
880		return 1;
881	return 0;
882}
883
884static inline struct scatterlist *
885ata_qc_first_sg(struct ata_queued_cmd *qc)
886{
887	if (qc->n_elem)
888		return qc->__sg;
889	if (qc->pad_len)
890		return &qc->pad_sgent;
891	return NULL;
892}
893
894static inline struct scatterlist *
895ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
896{
897	if (sg == &qc->pad_sgent)
898		return NULL;
899	if (++sg - qc->__sg < qc->n_elem)
900		return sg;
901	if (qc->pad_len)
902		return &qc->pad_sgent;
903	return NULL;
904}
905
906#define ata_for_each_sg(sg, qc) \
907	for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc))
908
909static inline unsigned int ata_tag_valid(unsigned int tag)
910{
911	return (tag < ATA_MAX_QUEUE) ? 1 : 0;
912}
913
914static inline unsigned int ata_tag_internal(unsigned int tag)
915{
916	return tag == ATA_MAX_QUEUE - 1;
917}
918
919/*
920 * device helpers
921 */
922static inline unsigned int ata_class_enabled(unsigned int class)
923{
924	return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
925}
926
927static inline unsigned int ata_class_disabled(unsigned int class)
928{
929	return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
930}
931
932static inline unsigned int ata_class_absent(unsigned int class)
933{
934	return !ata_class_enabled(class) && !ata_class_disabled(class);
935}
936
937static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
938{
939	return ata_class_enabled(dev->class);
940}
941
942static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
943{
944	return ata_class_disabled(dev->class);
945}
946
947static inline unsigned int ata_dev_absent(const struct ata_device *dev)
948{
949	return ata_class_absent(dev->class);
950}
951
952static inline unsigned int ata_dev_ready(const struct ata_device *dev)
953{
954	return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED);
955}
956
957/*
958 * port helpers
959 */
960static inline int ata_port_max_devices(const struct ata_port *ap)
961{
962	if (ap->flags & ATA_FLAG_SLAVE_POSS)
963		return 2;
964	return 1;
965}
966
967
968static inline u8 ata_chk_status(struct ata_port *ap)
969{
970	return ap->ops->check_status(ap);
971}
972
973
974/**
975 *	ata_pause - Flush writes and pause 400 nanoseconds.
976 *	@ap: Port to wait for.
977 *
978 *	LOCKING:
979 *	Inherited from caller.
980 */
981
982static inline void ata_pause(struct ata_port *ap)
983{
984	ata_altstatus(ap);
985	ndelay(400);
986}
987
988
989/**
990 *	ata_busy_wait - Wait for a port status register
991 *	@ap: Port to wait for.
992 *
993 *	Waits up to max*10 microseconds for the selected bits in the port's
994 *	status register to be cleared.
995 *	Returns final value of status register.
996 *
997 *	LOCKING:
998 *	Inherited from caller.
999 */
1000
1001static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits,
1002			       unsigned int max)
1003{
1004	u8 status;
1005
1006	do {
1007		udelay(10);
1008		status = ata_chk_status(ap);
1009		max--;
1010	} while ((status & bits) && (max > 0));
1011
1012	return status;
1013}
1014
1015
1016/**
1017 *	ata_wait_idle - Wait for a port to be idle.
1018 *	@ap: Port to wait for.
1019 *
1020 *	Waits up to 10ms for port's BUSY and DRQ signals to clear.
1021 *	Returns final value of status register.
1022 *
1023 *	LOCKING:
1024 *	Inherited from caller.
1025 */
1026
1027static inline u8 ata_wait_idle(struct ata_port *ap)
1028{
1029	u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
1030
1031	if (status & (ATA_BUSY | ATA_DRQ)) {
1032		unsigned long l = ap->ioaddr.status_addr;
1033		if (ata_msg_warn(ap))
1034			printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
1035				status, l);
1036	}
1037
1038	return status;
1039}
1040
1041static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
1042{
1043	qc->tf.ctl |= ATA_NIEN;
1044}
1045
1046static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
1047						       unsigned int tag)
1048{
1049	if (likely(ata_tag_valid(tag)))
1050		return &ap->qcmd[tag];
1051	return NULL;
1052}
1053
1054static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
1055						     unsigned int tag)
1056{
1057	struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1058
1059	if (unlikely(!qc) || !ap->ops->error_handler)
1060		return qc;
1061
1062	if ((qc->flags & (ATA_QCFLAG_ACTIVE |
1063			  ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
1064		return qc;
1065
1066	return NULL;
1067}
1068
1069static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
1070{
1071	memset(tf, 0, sizeof(*tf));
1072
1073	tf->ctl = dev->ap->ctl;
1074	if (dev->devno == 0)
1075		tf->device = ATA_DEVICE_OBS;
1076	else
1077		tf->device = ATA_DEVICE_OBS | ATA_DEV1;
1078}
1079
1080static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
1081{
1082	qc->__sg = NULL;
1083	qc->flags = 0;
1084	qc->cursect = qc->cursg = qc->cursg_ofs = 0;
1085	qc->nsect = 0;
1086	qc->nbytes = qc->curbytes = 0;
1087	qc->err_mask = 0;
1088
1089	ata_tf_init(qc->dev, &qc->tf);
1090
1091	/* init result_tf such that it indicates normal completion */
1092	qc->result_tf.command = ATA_DRDY;
1093	qc->result_tf.feature = 0;
1094}
1095
1096/**
1097 *	ata_irq_on - Enable interrupts on a port.
1098 *	@ap: Port on which interrupts are enabled.
1099 *
1100 *	Enable interrupts on a legacy IDE device using MMIO or PIO,
1101 *	wait for idle, clear any pending interrupts.
1102 *
1103 *	LOCKING:
1104 *	Inherited from caller.
1105 */
1106
1107static inline u8 ata_irq_on(struct ata_port *ap)
1108{
1109	struct ata_ioports *ioaddr = &ap->ioaddr;
1110	u8 tmp;
1111
1112	ap->ctl &= ~ATA_NIEN;
1113	ap->last_ctl = ap->ctl;
1114
1115	if (ap->flags & ATA_FLAG_MMIO)
1116		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1117	else
1118		outb(ap->ctl, ioaddr->ctl_addr);
1119	tmp = ata_wait_idle(ap);
1120
1121	ap->ops->irq_clear(ap);
1122
1123	return tmp;
1124}
1125
1126
1127/**
1128 *	ata_irq_ack - Acknowledge a device interrupt.
1129 *	@ap: Port on which interrupts are enabled.
1130 *
1131 *	Wait up to 10 ms for legacy IDE device to become idle (BUSY
1132 *	or BUSY+DRQ clear).  Obtain dma status and port status from
1133 *	device.  Clear the interrupt.  Return port status.
1134 *
1135 *	LOCKING:
1136 */
1137
1138static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
1139{
1140	unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
1141	u8 host_stat, post_stat, status;
1142
1143	status = ata_busy_wait(ap, bits, 1000);
1144	if (status & bits)
1145		if (ata_msg_err(ap))
1146			printk(KERN_ERR "abnormal status 0x%X\n", status);
1147
1148	/* get controller status; clear intr, err bits */
1149	if (ap->flags & ATA_FLAG_MMIO) {
1150		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
1151		host_stat = readb(mmio + ATA_DMA_STATUS);
1152		writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
1153		       mmio + ATA_DMA_STATUS);
1154
1155		post_stat = readb(mmio + ATA_DMA_STATUS);
1156	} else {
1157		host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1158		outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
1159		     ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1160
1161		post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1162	}
1163
1164	if (ata_msg_intr(ap))
1165		printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
1166			__FUNCTION__,
1167			host_stat, post_stat, status);
1168
1169	return status;
1170}
1171
1172static inline int ata_try_flush_cache(const struct ata_device *dev)
1173{
1174	return ata_id_wcache_enabled(dev->id) ||
1175	       ata_id_has_flush(dev->id) ||
1176	       ata_id_has_flush_ext(dev->id);
1177}
1178
1179static inline unsigned int ac_err_mask(u8 status)
1180{
1181	if (status & (ATA_BUSY | ATA_DRQ))
1182		return AC_ERR_HSM;
1183	if (status & (ATA_ERR | ATA_DF))
1184		return AC_ERR_DEV;
1185	return 0;
1186}
1187
1188static inline unsigned int __ac_err_mask(u8 status)
1189{
1190	unsigned int mask = ac_err_mask(status);
1191	if (mask == 0)
1192		return AC_ERR_OTHER;
1193	return mask;
1194}
1195
1196static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev)
1197{
1198	ap->pad_dma = 0;
1199	ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ,
1200				     &ap->pad_dma, GFP_KERNEL);
1201	return (ap->pad == NULL) ? -ENOMEM : 0;
1202}
1203
1204static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
1205{
1206	dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
1207}
1208
1209static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
1210{
1211	return (struct ata_port *) &host->hostdata[0];
1212}
1213
1214#endif /* __LINUX_LIBATA_H__ */
1215