libata.h revision a6b2c5d4754dc539a560fdf0d3fb78a14174394a
1/*
2 *  Copyright 2003-2005 Red Hat, Inc.  All rights reserved.
3 *  Copyright 2003-2005 Jeff Garzik
4 *
5 *
6 *  This program is free software; you can redistribute it and/or modify
7 *  it under the terms of the GNU General Public License as published by
8 *  the Free Software Foundation; either version 2, or (at your option)
9 *  any later version.
10 *
11 *  This program is distributed in the hope that it will be useful,
12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 *  GNU General Public License for more details.
15 *
16 *  You should have received a copy of the GNU General Public License
17 *  along with this program; see the file COPYING.  If not, write to
18 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 *
21 *  libata documentation is available via 'make {ps|pdf}docs',
22 *  as Documentation/DocBook/libata.*
23 *
24 */
25
26#ifndef __LINUX_LIBATA_H__
27#define __LINUX_LIBATA_H__
28
29#include <linux/delay.h>
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <asm/io.h>
34#include <linux/ata.h>
35#include <linux/workqueue.h>
36#include <scsi/scsi_host.h>
37
38/*
39 * compile-time options: to be removed as soon as all the drivers are
40 * converted to the new debugging mechanism
41 */
42#undef ATA_DEBUG		/* debugging output */
43#undef ATA_VERBOSE_DEBUG	/* yet more debugging output */
44#undef ATA_IRQ_TRAP		/* define to ack screaming irqs */
45#undef ATA_NDEBUG		/* define to disable quick runtime checks */
46#undef ATA_ENABLE_PATA		/* define to enable PATA support in some
47				 * low-level drivers */
48
49
50/* note: prints function name for you */
51#ifdef ATA_DEBUG
52#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
53#ifdef ATA_VERBOSE_DEBUG
54#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
55#else
56#define VPRINTK(fmt, args...)
57#endif	/* ATA_VERBOSE_DEBUG */
58#else
59#define DPRINTK(fmt, args...)
60#define VPRINTK(fmt, args...)
61#endif	/* ATA_DEBUG */
62
63#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
64
65/* NEW: debug levels */
66#define HAVE_LIBATA_MSG 1
67
68enum {
69	ATA_MSG_DRV	= 0x0001,
70	ATA_MSG_INFO	= 0x0002,
71	ATA_MSG_PROBE	= 0x0004,
72	ATA_MSG_WARN	= 0x0008,
73	ATA_MSG_MALLOC	= 0x0010,
74	ATA_MSG_CTL	= 0x0020,
75	ATA_MSG_INTR	= 0x0040,
76	ATA_MSG_ERR	= 0x0080,
77};
78
79#define ata_msg_drv(p)    ((p)->msg_enable & ATA_MSG_DRV)
80#define ata_msg_info(p)   ((p)->msg_enable & ATA_MSG_INFO)
81#define ata_msg_probe(p)  ((p)->msg_enable & ATA_MSG_PROBE)
82#define ata_msg_warn(p)   ((p)->msg_enable & ATA_MSG_WARN)
83#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
84#define ata_msg_ctl(p)    ((p)->msg_enable & ATA_MSG_CTL)
85#define ata_msg_intr(p)   ((p)->msg_enable & ATA_MSG_INTR)
86#define ata_msg_err(p)    ((p)->msg_enable & ATA_MSG_ERR)
87
88static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
89{
90	if (dval < 0 || dval >= (sizeof(u32) * 8))
91		return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
92	if (!dval)
93		return 0;
94	return (1 << dval) - 1;
95}
96
97/* defines only for the constants which don't work well as enums */
98#define ATA_TAG_POISON		0xfafbfcfdU
99
100/* move to PCI layer? */
101static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
102{
103	return &pdev->dev;
104}
105
106enum {
107	/* various global constants */
108	LIBATA_MAX_PRD		= ATA_MAX_PRD / 2,
109	ATA_MAX_PORTS		= 8,
110	ATA_DEF_QUEUE		= 1,
111	/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
112	ATA_MAX_QUEUE		= 32,
113	ATA_TAG_INTERNAL	= ATA_MAX_QUEUE - 1,
114	ATA_MAX_SECTORS		= 200,	/* FIXME */
115	ATA_MAX_SECTORS_LBA48	= 65535,
116	ATA_MAX_BUS		= 2,
117	ATA_DEF_BUSY_WAIT	= 10000,
118	ATA_SHORT_PAUSE		= (HZ >> 6) + 1,
119
120	ATA_SHT_EMULATED	= 1,
121	ATA_SHT_CMD_PER_LUN	= 1,
122	ATA_SHT_THIS_ID		= -1,
123	ATA_SHT_USE_CLUSTERING	= 1,
124
125	/* struct ata_device stuff */
126	ATA_DFLAG_LBA		= (1 << 0), /* device supports LBA */
127	ATA_DFLAG_LBA48		= (1 << 1), /* device supports LBA48 */
128	ATA_DFLAG_CDB_INTR	= (1 << 2), /* device asserts INTRQ when ready for CDB */
129	ATA_DFLAG_NCQ		= (1 << 3), /* device supports NCQ */
130	ATA_DFLAG_CFG_MASK	= (1 << 8) - 1,
131
132	ATA_DFLAG_PIO		= (1 << 8), /* device currently in PIO mode */
133
134	ATA_DEV_UNKNOWN		= 0,	/* unknown device */
135	ATA_DEV_ATA		= 1,	/* ATA device */
136	ATA_DEV_ATA_UNSUP	= 2,	/* ATA device (unsupported) */
137	ATA_DEV_ATAPI		= 3,	/* ATAPI device */
138	ATA_DEV_ATAPI_UNSUP	= 4,	/* ATAPI device (unsupported) */
139	ATA_DEV_NONE		= 5,	/* no device */
140
141	/* struct ata_port flags */
142	ATA_FLAG_SLAVE_POSS	= (1 << 0), /* host supports slave dev */
143					    /* (doesn't imply presence) */
144	ATA_FLAG_SATA		= (1 << 1),
145	ATA_FLAG_NO_LEGACY	= (1 << 2), /* no legacy mode check */
146	ATA_FLAG_MMIO		= (1 << 3), /* use MMIO, not PIO */
147	ATA_FLAG_SRST		= (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
148	ATA_FLAG_SATA_RESET	= (1 << 5), /* (obsolete) use COMRESET */
149	ATA_FLAG_NO_ATAPI	= (1 << 6), /* No ATAPI support */
150	ATA_FLAG_PIO_DMA	= (1 << 7), /* PIO cmds via DMA */
151	ATA_FLAG_PIO_LBA48	= (1 << 8), /* Host DMA engine is LBA28 only */
152	ATA_FLAG_IRQ_MASK	= (1 << 9), /* Mask IRQ in PIO xfers */
153	ATA_FLAG_PIO_POLLING	= (1 << 10), /* use polling PIO if LLD
154					      * doesn't handle PIO interrupts */
155	ATA_FLAG_NCQ		= (1 << 11), /* host supports NCQ */
156
157	ATA_FLAG_DEBUGMSG	= (1 << 14),
158	ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* flush port task */
159
160	ATA_FLAG_EH_PENDING	= (1 << 16), /* EH pending */
161	ATA_FLAG_FROZEN		= (1 << 17), /* port is frozen */
162	ATA_FLAG_RECOVERED	= (1 << 18), /* recovery action performed */
163
164	ATA_FLAG_DISABLED	= (1 << 22), /* port is disabled, ignore it */
165	ATA_FLAG_SUSPENDED	= (1 << 23), /* port is suspended (power) */
166
167	/* bits 24:31 of ap->flags are reserved for LLDD specific flags */
168
169	/* struct ata_queued_cmd flags */
170	ATA_QCFLAG_ACTIVE	= (1 << 0), /* cmd not yet ack'd to scsi lyer */
171	ATA_QCFLAG_SG		= (1 << 1), /* have s/g table? */
172	ATA_QCFLAG_SINGLE	= (1 << 2), /* no s/g, just a single buffer */
173	ATA_QCFLAG_DMAMAP	= ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
174	ATA_QCFLAG_IO		= (1 << 3), /* standard IO command */
175	ATA_QCFLAG_RESULT_TF	= (1 << 4), /* result TF requested */
176
177	ATA_QCFLAG_FAILED	= (1 << 16), /* cmd failed and is owned by EH */
178	ATA_QCFLAG_SENSE_VALID	= (1 << 17), /* sense data valid */
179	ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
180
181	/* host set flags */
182	ATA_HOST_SIMPLEX	= (1 << 0),	/* Host is simplex, one DMA channel per host_set only */
183
184	/* various lengths of time */
185	ATA_TMOUT_BOOT		= 30 * HZ,	/* heuristic */
186	ATA_TMOUT_BOOT_QUICK	= 7 * HZ,	/* heuristic */
187	ATA_TMOUT_INTERNAL	= 30 * HZ,
188	ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
189
190	/* ATA bus states */
191	BUS_UNKNOWN		= 0,
192	BUS_DMA			= 1,
193	BUS_IDLE		= 2,
194	BUS_NOINTR		= 3,
195	BUS_NODATA		= 4,
196	BUS_TIMER		= 5,
197	BUS_PIO			= 6,
198	BUS_EDD			= 7,
199	BUS_IDENTIFY		= 8,
200	BUS_PACKET		= 9,
201
202	/* SATA port states */
203	PORT_UNKNOWN		= 0,
204	PORT_ENABLED		= 1,
205	PORT_DISABLED		= 2,
206
207	/* encoding various smaller bitmaps into a single
208	 * unsigned int bitmap
209	 */
210	ATA_BITS_PIO		= 5,
211	ATA_BITS_MWDMA		= 3,
212	ATA_BITS_UDMA		= 8,
213
214	ATA_SHIFT_PIO		= 0,
215	ATA_SHIFT_MWDMA		= ATA_SHIFT_PIO + ATA_BITS_PIO,
216	ATA_SHIFT_UDMA		= ATA_SHIFT_MWDMA + ATA_BITS_MWDMA,
217
218	ATA_MASK_PIO		= ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO,
219	ATA_MASK_MWDMA		= ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA,
220	ATA_MASK_UDMA		= ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA,
221
222	/* size of buffer to pad xfers ending on unaligned boundaries */
223	ATA_DMA_PAD_SZ		= 4,
224	ATA_DMA_PAD_BUF_SZ	= ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
225
226	/* masks for port functions */
227	ATA_PORT_PRIMARY	= (1 << 0),
228	ATA_PORT_SECONDARY	= (1 << 1),
229
230	/* ering size */
231	ATA_ERING_SIZE		= 32,
232
233	/* desc_len for ata_eh_info and context */
234	ATA_EH_DESC_LEN		= 80,
235
236	/* reset / recovery action types */
237	ATA_EH_REVALIDATE	= (1 << 0),
238	ATA_EH_SOFTRESET	= (1 << 1),
239	ATA_EH_HARDRESET	= (1 << 2),
240
241	ATA_EH_RESET_MASK	= ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
242
243	/* ata_eh_info->flags */
244	ATA_EHI_DID_RESET	= (1 << 0), /* already reset this port */
245
246	/* max repeat if error condition is still set after ->error_handler */
247	ATA_EH_MAX_REPEAT	= 5,
248
249	/* how hard are we gonna try to probe/recover devices */
250	ATA_PROBE_MAX_TRIES	= 3,
251	ATA_EH_RESET_TRIES	= 3,
252	ATA_EH_DEV_TRIES	= 3,
253};
254
255enum hsm_task_states {
256	HSM_ST_UNKNOWN,		/* state unknown */
257	HSM_ST_IDLE,		/* no command on going */
258	HSM_ST,			/* (waiting the device to) transfer data */
259	HSM_ST_LAST,		/* (waiting the device to) complete command */
260	HSM_ST_ERR,		/* error */
261	HSM_ST_FIRST,		/* (waiting the device to)
262				   write CDB or first data block */
263};
264
265enum ata_completion_errors {
266	AC_ERR_DEV		= (1 << 0), /* device reported error */
267	AC_ERR_HSM		= (1 << 1), /* host state machine violation */
268	AC_ERR_TIMEOUT		= (1 << 2), /* timeout */
269	AC_ERR_MEDIA		= (1 << 3), /* media error */
270	AC_ERR_ATA_BUS		= (1 << 4), /* ATA bus error */
271	AC_ERR_HOST_BUS		= (1 << 5), /* host bus error */
272	AC_ERR_SYSTEM		= (1 << 6), /* system error */
273	AC_ERR_INVALID		= (1 << 7), /* invalid argument */
274	AC_ERR_OTHER		= (1 << 8), /* unknown */
275};
276
277/* forward declarations */
278struct scsi_device;
279struct ata_port_operations;
280struct ata_port;
281struct ata_queued_cmd;
282
283/* typedefs */
284typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
285typedef void (*ata_probeinit_fn_t)(struct ata_port *);
286typedef int (*ata_reset_fn_t)(struct ata_port *, unsigned int *);
287typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *);
288
289struct ata_ioports {
290	unsigned long		cmd_addr;
291	unsigned long		data_addr;
292	unsigned long		error_addr;
293	unsigned long		feature_addr;
294	unsigned long		nsect_addr;
295	unsigned long		lbal_addr;
296	unsigned long		lbam_addr;
297	unsigned long		lbah_addr;
298	unsigned long		device_addr;
299	unsigned long		status_addr;
300	unsigned long		command_addr;
301	unsigned long		altstatus_addr;
302	unsigned long		ctl_addr;
303	unsigned long		bmdma_addr;
304	unsigned long		scr_addr;
305};
306
307struct ata_probe_ent {
308	struct list_head	node;
309	struct device 		*dev;
310	const struct ata_port_operations *port_ops;
311	struct scsi_host_template *sht;
312	struct ata_ioports	port[ATA_MAX_PORTS];
313	unsigned int		n_ports;
314	unsigned int		hard_port_no;
315	unsigned int		pio_mask;
316	unsigned int		mwdma_mask;
317	unsigned int		udma_mask;
318	unsigned int		legacy_mode;
319	unsigned long		irq;
320	unsigned int		irq_flags;
321	unsigned long		host_flags;
322	unsigned long		host_set_flags;
323	void __iomem		*mmio_base;
324	void			*private_data;
325};
326
327struct ata_host_set {
328	spinlock_t		lock;
329	struct device 		*dev;
330	unsigned long		irq;
331	void __iomem		*mmio_base;
332	unsigned int		n_ports;
333	void			*private_data;
334	const struct ata_port_operations *ops;
335	unsigned long		flags;
336	int			simplex_claimed;	/* Keep seperate in case we
337							   ever need to do this locked */
338	struct ata_port *	ports[0];
339};
340
341struct ata_queued_cmd {
342	struct ata_port		*ap;
343	struct ata_device	*dev;
344
345	struct scsi_cmnd	*scsicmd;
346	void			(*scsidone)(struct scsi_cmnd *);
347
348	struct ata_taskfile	tf;
349	u8			cdb[ATAPI_CDB_LEN];
350
351	unsigned long		flags;		/* ATA_QCFLAG_xxx */
352	unsigned int		tag;
353	unsigned int		n_elem;
354	unsigned int		orig_n_elem;
355
356	int			dma_dir;
357
358	unsigned int		pad_len;
359
360	unsigned int		nsect;
361	unsigned int		cursect;
362
363	unsigned int		nbytes;
364	unsigned int		curbytes;
365
366	unsigned int		cursg;
367	unsigned int		cursg_ofs;
368
369	struct scatterlist	sgent;
370	struct scatterlist	pad_sgent;
371	void			*buf_virt;
372
373	/* DO NOT iterate over __sg manually, use ata_for_each_sg() */
374	struct scatterlist	*__sg;
375
376	unsigned int		err_mask;
377	struct ata_taskfile	result_tf;
378	ata_qc_cb_t		complete_fn;
379
380	void			*private_data;
381};
382
383struct ata_host_stats {
384	unsigned long		unhandled_irq;
385	unsigned long		idle_irq;
386	unsigned long		rw_reqbuf;
387};
388
389struct ata_ering_entry {
390	int			is_io;
391	unsigned int		err_mask;
392	u64			timestamp;
393};
394
395struct ata_ering {
396	int			cursor;
397	struct ata_ering_entry	ring[ATA_ERING_SIZE];
398};
399
400struct ata_device {
401	struct ata_port		*ap;
402	u64			n_sectors;	/* size of device, if ATA */
403	unsigned long		flags;		/* ATA_DFLAG_xxx */
404	unsigned int		class;		/* ATA_DEV_xxx */
405	unsigned int		devno;		/* 0 or 1 */
406	u16			id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
407	u8			pio_mode;
408	u8			dma_mode;
409	u8			xfer_mode;
410	unsigned int		xfer_shift;	/* ATA_SHIFT_xxx */
411
412	unsigned int		multi_count;	/* sectors count for
413						   READ/WRITE MULTIPLE */
414	unsigned int		max_sectors;	/* per-device max sectors */
415	unsigned int		cdb_len;
416
417	/* per-dev xfer mask */
418	unsigned int		pio_mask;
419	unsigned int		mwdma_mask;
420	unsigned int		udma_mask;
421
422	/* for CHS addressing */
423	u16			cylinders;	/* Number of cylinders */
424	u16			heads;		/* Number of heads */
425	u16			sectors;	/* Number of sectors per track */
426
427	/* error history */
428	struct ata_ering	ering;
429};
430
431struct ata_eh_info {
432	struct ata_device	*dev;		/* offending device */
433	u32			serror;		/* SError from LLDD */
434	unsigned int		err_mask;	/* port-wide err_mask */
435	unsigned int		action;		/* ATA_EH_* action mask */
436	unsigned int		flags;		/* ATA_EHI_* flags */
437	char			desc[ATA_EH_DESC_LEN];
438	int			desc_len;
439};
440
441struct ata_eh_context {
442	struct ata_eh_info	i;
443	int			tries[ATA_MAX_DEVICES];
444};
445
446struct ata_port {
447	struct Scsi_Host	*host;	/* our co-allocated scsi host */
448	const struct ata_port_operations *ops;
449	unsigned long		flags;	/* ATA_FLAG_xxx */
450	unsigned int		id;	/* unique id req'd by scsi midlyr */
451	unsigned int		port_no; /* unique port #; from zero */
452	unsigned int		hard_port_no;	/* hardware port #; from zero */
453
454	struct ata_prd		*prd;	 /* our SG list */
455	dma_addr_t		prd_dma; /* and its DMA mapping */
456
457	void			*pad;	/* array of DMA pad buffers */
458	dma_addr_t		pad_dma;
459
460	struct ata_ioports	ioaddr;	/* ATA cmd/ctl/dma register blocks */
461
462	u8			ctl;	/* cache of ATA control register */
463	u8			last_ctl;	/* Cache last written value */
464	unsigned int		pio_mask;
465	unsigned int		mwdma_mask;
466	unsigned int		udma_mask;
467	unsigned int		cbl;	/* cable type; ATA_CBL_xxx */
468	unsigned int		sata_spd_limit;	/* SATA PHY speed limit */
469
470	/* record runtime error info, protected by host_set lock */
471	struct ata_eh_info	eh_info;
472	/* EH context owned by EH */
473	struct ata_eh_context	eh_context;
474
475	struct ata_device	device[ATA_MAX_DEVICES];
476
477	struct ata_queued_cmd	qcmd[ATA_MAX_QUEUE];
478	unsigned long		qc_allocated;
479	unsigned int		qc_active;
480
481	unsigned int		active_tag;
482	u32			sactive;
483
484	struct ata_host_stats	stats;
485	struct ata_host_set	*host_set;
486	struct device 		*dev;
487
488	struct work_struct	port_task;
489
490	unsigned int		hsm_task_state;
491
492	u32			msg_enable;
493	struct list_head	eh_done_q;
494
495	void			*private_data;
496
497	u8			sector_buf[ATA_SECT_SIZE]; /* owned by EH */
498};
499
500struct ata_port_operations {
501	void (*port_disable) (struct ata_port *);
502
503	void (*dev_config) (struct ata_port *, struct ata_device *);
504
505	void (*set_piomode) (struct ata_port *, struct ata_device *);
506	void (*set_dmamode) (struct ata_port *, struct ata_device *);
507	unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long);
508
509	void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
510	void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
511
512	void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
513	u8   (*check_status)(struct ata_port *ap);
514	u8   (*check_altstatus)(struct ata_port *ap);
515	void (*dev_select)(struct ata_port *ap, unsigned int device);
516
517	void (*phy_reset) (struct ata_port *ap); /* obsolete */
518	void (*set_mode) (struct ata_port *ap);
519	int (*probe_reset) (struct ata_port *ap, unsigned int *classes);
520
521	void (*post_set_mode) (struct ata_port *ap);
522
523	int (*check_atapi_dma) (struct ata_queued_cmd *qc);
524
525	void (*bmdma_setup) (struct ata_queued_cmd *qc);
526	void (*bmdma_start) (struct ata_queued_cmd *qc);
527
528	void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
529
530	void (*qc_prep) (struct ata_queued_cmd *qc);
531	unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
532
533	/* Error handlers.  ->error_handler overrides ->eng_timeout and
534	 * indicates that new-style EH is in place.
535	 */
536	void (*eng_timeout) (struct ata_port *ap); /* obsolete */
537
538	void (*freeze) (struct ata_port *ap);
539	void (*thaw) (struct ata_port *ap);
540	void (*error_handler) (struct ata_port *ap);
541	void (*post_internal_cmd) (struct ata_queued_cmd *qc);
542
543	irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
544	void (*irq_clear) (struct ata_port *);
545
546	u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
547	void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
548			   u32 val);
549
550	int (*port_start) (struct ata_port *ap);
551	void (*port_stop) (struct ata_port *ap);
552
553	void (*host_stop) (struct ata_host_set *host_set);
554
555	void (*bmdma_stop) (struct ata_queued_cmd *qc);
556	u8   (*bmdma_status) (struct ata_port *ap);
557};
558
559struct ata_port_info {
560	struct scsi_host_template	*sht;
561	unsigned long		host_flags;
562	unsigned long		pio_mask;
563	unsigned long		mwdma_mask;
564	unsigned long		udma_mask;
565	const struct ata_port_operations *port_ops;
566	void 			*private_data;
567};
568
569struct ata_timing {
570	unsigned short mode;		/* ATA mode */
571	unsigned short setup;		/* t1 */
572	unsigned short act8b;		/* t2 for 8-bit I/O */
573	unsigned short rec8b;		/* t2i for 8-bit I/O */
574	unsigned short cyc8b;		/* t0 for 8-bit I/O */
575	unsigned short active;		/* t2 or tD */
576	unsigned short recover;		/* t2i or tK */
577	unsigned short cycle;		/* t0 */
578	unsigned short udma;		/* t2CYCTYP/2 */
579};
580
581#define FIT(v,vmin,vmax)	max_t(short,min_t(short,v,vmax),vmin)
582
583extern void ata_port_probe(struct ata_port *);
584extern void __sata_phy_reset(struct ata_port *ap);
585extern void sata_phy_reset(struct ata_port *ap);
586extern void ata_bus_reset(struct ata_port *ap);
587extern int sata_set_spd(struct ata_port *ap);
588extern int ata_drive_probe_reset(struct ata_port *ap,
589			ata_probeinit_fn_t probeinit,
590			ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
591			ata_postreset_fn_t postreset, unsigned int *classes);
592extern void ata_std_probeinit(struct ata_port *ap);
593extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
594extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
595extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
596extern int ata_dev_revalidate(struct ata_device *dev, int post_reset);
597extern void ata_port_disable(struct ata_port *);
598extern void ata_std_ports(struct ata_ioports *ioaddr);
599#ifdef CONFIG_PCI
600extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
601			     unsigned int n_ports);
602extern void ata_pci_remove_one (struct pci_dev *pdev);
603extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
604extern int ata_pci_device_resume(struct pci_dev *pdev);
605extern int ata_pci_clear_simplex(struct pci_dev *pdev);
606#endif /* CONFIG_PCI */
607extern int ata_device_add(const struct ata_probe_ent *ent);
608extern void ata_host_set_remove(struct ata_host_set *host_set);
609extern int ata_scsi_detect(struct scsi_host_template *sht);
610extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
611extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
612extern int ata_scsi_release(struct Scsi_Host *host);
613extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
614extern int sata_scr_valid(struct ata_port *ap);
615extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
616extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
617extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
618extern int ata_port_online(struct ata_port *ap);
619extern int ata_port_offline(struct ata_port *ap);
620extern int ata_scsi_device_resume(struct scsi_device *);
621extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
622extern int ata_device_resume(struct ata_device *);
623extern int ata_device_suspend(struct ata_device *, pm_message_t state);
624extern int ata_ratelimit(void);
625extern unsigned int ata_busy_sleep(struct ata_port *ap,
626				   unsigned long timeout_pat,
627				   unsigned long timeout);
628extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
629				void *data, unsigned long delay);
630extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
631			     unsigned long interval_msec,
632			     unsigned long timeout_msec);
633
634/*
635 * Default driver ops implementations
636 */
637extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
638extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
639extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp);
640extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
641extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
642extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
643extern u8 ata_check_status(struct ata_port *ap);
644extern u8 ata_altstatus(struct ata_port *ap);
645extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
646extern int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes);
647extern int ata_port_start (struct ata_port *ap);
648extern void ata_port_stop (struct ata_port *ap);
649extern void ata_host_stop (struct ata_host_set *host_set);
650extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
651extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
652			       unsigned int buflen, int write_data);
653extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
654			      unsigned int buflen, int write_data);
655extern void ata_qc_prep(struct ata_queued_cmd *qc);
656extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
657extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
658extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
659		unsigned int buflen);
660extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
661		 unsigned int n_elem);
662extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
663extern void ata_id_string(const u16 *id, unsigned char *s,
664			  unsigned int ofs, unsigned int len);
665extern void ata_id_c_string(const u16 *id, unsigned char *s,
666			    unsigned int ofs, unsigned int len);
667extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
668extern void ata_bmdma_start (struct ata_queued_cmd *qc);
669extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
670extern u8   ata_bmdma_status(struct ata_port *ap);
671extern void ata_bmdma_irq_clear(struct ata_port *ap);
672extern void ata_bmdma_freeze(struct ata_port *ap);
673extern void ata_bmdma_thaw(struct ata_port *ap);
674extern void ata_bmdma_drive_eh(struct ata_port *ap,
675			       ata_reset_fn_t softreset,
676			       ata_reset_fn_t hardreset,
677			       ata_postreset_fn_t postreset);
678extern void ata_bmdma_error_handler(struct ata_port *ap);
679extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
680extern void ata_qc_complete(struct ata_queued_cmd *qc);
681extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
682				    void (*finish_qc)(struct ata_queued_cmd *));
683extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
684			      void (*done)(struct scsi_cmnd *));
685extern int ata_std_bios_param(struct scsi_device *sdev,
686			      struct block_device *bdev,
687			      sector_t capacity, int geom[]);
688extern int ata_scsi_slave_config(struct scsi_device *sdev);
689extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
690				       int queue_depth);
691extern struct ata_device *ata_dev_pair(struct ata_device *adev);
692
693/*
694 * Timing helpers
695 */
696
697extern unsigned int ata_pio_need_iordy(const struct ata_device *);
698extern int ata_timing_compute(struct ata_device *, unsigned short,
699			      struct ata_timing *, int, int);
700extern void ata_timing_merge(const struct ata_timing *,
701			     const struct ata_timing *, struct ata_timing *,
702			     unsigned int);
703
704enum {
705	ATA_TIMING_SETUP	= (1 << 0),
706	ATA_TIMING_ACT8B	= (1 << 1),
707	ATA_TIMING_REC8B	= (1 << 2),
708	ATA_TIMING_CYC8B	= (1 << 3),
709	ATA_TIMING_8BIT		= ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
710				  ATA_TIMING_CYC8B,
711	ATA_TIMING_ACTIVE	= (1 << 4),
712	ATA_TIMING_RECOVER	= (1 << 5),
713	ATA_TIMING_CYCLE	= (1 << 6),
714	ATA_TIMING_UDMA		= (1 << 7),
715	ATA_TIMING_ALL		= ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
716				  ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
717				  ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
718				  ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
719};
720
721
722#ifdef CONFIG_PCI
723struct pci_bits {
724	unsigned int		reg;	/* PCI config register to read */
725	unsigned int		width;	/* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
726	unsigned long		mask;
727	unsigned long		val;
728};
729
730extern void ata_pci_host_stop (struct ata_host_set *host_set);
731extern struct ata_probe_ent *
732ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
733extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
734extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
735#endif /* CONFIG_PCI */
736
737/*
738 * EH
739 */
740extern void ata_eng_timeout(struct ata_port *ap);
741
742extern void ata_port_schedule_eh(struct ata_port *ap);
743extern int ata_port_abort(struct ata_port *ap);
744extern int ata_port_freeze(struct ata_port *ap);
745
746extern void ata_eh_freeze_port(struct ata_port *ap);
747extern void ata_eh_thaw_port(struct ata_port *ap);
748
749extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
750extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
751
752extern void ata_do_eh(struct ata_port *ap, ata_reset_fn_t softreset,
753		      ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
754
755/*
756 * printk helpers
757 */
758#define ata_port_printk(ap, lv, fmt, args...) \
759	printk(lv"ata%u: "fmt, (ap)->id , ##args)
760
761#define ata_dev_printk(dev, lv, fmt, args...) \
762	printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
763
764/*
765 * ata_eh_info helpers
766 */
767#define ata_ehi_push_desc(ehi, fmt, args...) do { \
768	(ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
769				     ATA_EH_DESC_LEN - (ehi)->desc_len, \
770				     fmt , ##args); \
771} while (0)
772
773#define ata_ehi_clear_desc(ehi) do { \
774	(ehi)->desc[0] = '\0'; \
775	(ehi)->desc_len = 0; \
776} while (0)
777
778/*
779 * qc helpers
780 */
781static inline int
782ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
783{
784	if (sg == &qc->pad_sgent)
785		return 1;
786	if (qc->pad_len)
787		return 0;
788	if (((sg - qc->__sg) + 1) == qc->n_elem)
789		return 1;
790	return 0;
791}
792
793static inline struct scatterlist *
794ata_qc_first_sg(struct ata_queued_cmd *qc)
795{
796	if (qc->n_elem)
797		return qc->__sg;
798	if (qc->pad_len)
799		return &qc->pad_sgent;
800	return NULL;
801}
802
803static inline struct scatterlist *
804ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
805{
806	if (sg == &qc->pad_sgent)
807		return NULL;
808	if (++sg - qc->__sg < qc->n_elem)
809		return sg;
810	if (qc->pad_len)
811		return &qc->pad_sgent;
812	return NULL;
813}
814
815#define ata_for_each_sg(sg, qc) \
816	for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc))
817
818static inline unsigned int ata_tag_valid(unsigned int tag)
819{
820	return (tag < ATA_MAX_QUEUE) ? 1 : 0;
821}
822
823static inline unsigned int ata_tag_internal(unsigned int tag)
824{
825	return tag == ATA_MAX_QUEUE - 1;
826}
827
828static inline unsigned int ata_class_enabled(unsigned int class)
829{
830	return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
831}
832
833static inline unsigned int ata_class_disabled(unsigned int class)
834{
835	return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
836}
837
838static inline unsigned int ata_class_absent(unsigned int class)
839{
840	return !ata_class_enabled(class) && !ata_class_disabled(class);
841}
842
843static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
844{
845	return ata_class_enabled(dev->class);
846}
847
848static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
849{
850	return ata_class_disabled(dev->class);
851}
852
853static inline unsigned int ata_dev_absent(const struct ata_device *dev)
854{
855	return ata_class_absent(dev->class);
856}
857
858static inline u8 ata_chk_status(struct ata_port *ap)
859{
860	return ap->ops->check_status(ap);
861}
862
863
864/**
865 *	ata_pause - Flush writes and pause 400 nanoseconds.
866 *	@ap: Port to wait for.
867 *
868 *	LOCKING:
869 *	Inherited from caller.
870 */
871
872static inline void ata_pause(struct ata_port *ap)
873{
874	ata_altstatus(ap);
875	ndelay(400);
876}
877
878
879/**
880 *	ata_busy_wait - Wait for a port status register
881 *	@ap: Port to wait for.
882 *
883 *	Waits up to max*10 microseconds for the selected bits in the port's
884 *	status register to be cleared.
885 *	Returns final value of status register.
886 *
887 *	LOCKING:
888 *	Inherited from caller.
889 */
890
891static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits,
892			       unsigned int max)
893{
894	u8 status;
895
896	do {
897		udelay(10);
898		status = ata_chk_status(ap);
899		max--;
900	} while ((status & bits) && (max > 0));
901
902	return status;
903}
904
905
906/**
907 *	ata_wait_idle - Wait for a port to be idle.
908 *	@ap: Port to wait for.
909 *
910 *	Waits up to 10ms for port's BUSY and DRQ signals to clear.
911 *	Returns final value of status register.
912 *
913 *	LOCKING:
914 *	Inherited from caller.
915 */
916
917static inline u8 ata_wait_idle(struct ata_port *ap)
918{
919	u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
920
921	if (status & (ATA_BUSY | ATA_DRQ)) {
922		unsigned long l = ap->ioaddr.status_addr;
923		if (ata_msg_warn(ap))
924			printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
925				status, l);
926	}
927
928	return status;
929}
930
931static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
932{
933	qc->tf.ctl |= ATA_NIEN;
934}
935
936static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
937						       unsigned int tag)
938{
939	if (likely(ata_tag_valid(tag)))
940		return &ap->qcmd[tag];
941	return NULL;
942}
943
944static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
945						     unsigned int tag)
946{
947	struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
948
949	if (unlikely(!qc) || !ap->ops->error_handler)
950		return qc;
951
952	if ((qc->flags & (ATA_QCFLAG_ACTIVE |
953			  ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
954		return qc;
955
956	return NULL;
957}
958
959static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
960{
961	memset(tf, 0, sizeof(*tf));
962
963	tf->ctl = dev->ap->ctl;
964	if (dev->devno == 0)
965		tf->device = ATA_DEVICE_OBS;
966	else
967		tf->device = ATA_DEVICE_OBS | ATA_DEV1;
968}
969
970static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
971{
972	qc->__sg = NULL;
973	qc->flags = 0;
974	qc->cursect = qc->cursg = qc->cursg_ofs = 0;
975	qc->nsect = 0;
976	qc->nbytes = qc->curbytes = 0;
977	qc->err_mask = 0;
978
979	ata_tf_init(qc->dev, &qc->tf);
980
981	/* init result_tf such that it indicates normal completion */
982	qc->result_tf.command = ATA_DRDY;
983	qc->result_tf.feature = 0;
984}
985
986/**
987 *	ata_irq_on - Enable interrupts on a port.
988 *	@ap: Port on which interrupts are enabled.
989 *
990 *	Enable interrupts on a legacy IDE device using MMIO or PIO,
991 *	wait for idle, clear any pending interrupts.
992 *
993 *	LOCKING:
994 *	Inherited from caller.
995 */
996
997static inline u8 ata_irq_on(struct ata_port *ap)
998{
999	struct ata_ioports *ioaddr = &ap->ioaddr;
1000	u8 tmp;
1001
1002	ap->ctl &= ~ATA_NIEN;
1003	ap->last_ctl = ap->ctl;
1004
1005	if (ap->flags & ATA_FLAG_MMIO)
1006		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1007	else
1008		outb(ap->ctl, ioaddr->ctl_addr);
1009	tmp = ata_wait_idle(ap);
1010
1011	ap->ops->irq_clear(ap);
1012
1013	return tmp;
1014}
1015
1016
1017/**
1018 *	ata_irq_ack - Acknowledge a device interrupt.
1019 *	@ap: Port on which interrupts are enabled.
1020 *
1021 *	Wait up to 10 ms for legacy IDE device to become idle (BUSY
1022 *	or BUSY+DRQ clear).  Obtain dma status and port status from
1023 *	device.  Clear the interrupt.  Return port status.
1024 *
1025 *	LOCKING:
1026 */
1027
1028static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
1029{
1030	unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
1031	u8 host_stat, post_stat, status;
1032
1033	status = ata_busy_wait(ap, bits, 1000);
1034	if (status & bits)
1035		if (ata_msg_err(ap))
1036			printk(KERN_ERR "abnormal status 0x%X\n", status);
1037
1038	/* get controller status; clear intr, err bits */
1039	if (ap->flags & ATA_FLAG_MMIO) {
1040		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
1041		host_stat = readb(mmio + ATA_DMA_STATUS);
1042		writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
1043		       mmio + ATA_DMA_STATUS);
1044
1045		post_stat = readb(mmio + ATA_DMA_STATUS);
1046	} else {
1047		host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1048		outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
1049		     ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1050
1051		post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1052	}
1053
1054	if (ata_msg_intr(ap))
1055		printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
1056			__FUNCTION__,
1057			host_stat, post_stat, status);
1058
1059	return status;
1060}
1061
1062static inline int ata_try_flush_cache(const struct ata_device *dev)
1063{
1064	return ata_id_wcache_enabled(dev->id) ||
1065	       ata_id_has_flush(dev->id) ||
1066	       ata_id_has_flush_ext(dev->id);
1067}
1068
1069static inline unsigned int ac_err_mask(u8 status)
1070{
1071	if (status & (ATA_BUSY | ATA_DRQ))
1072		return AC_ERR_HSM;
1073	if (status & (ATA_ERR | ATA_DF))
1074		return AC_ERR_DEV;
1075	return 0;
1076}
1077
1078static inline unsigned int __ac_err_mask(u8 status)
1079{
1080	unsigned int mask = ac_err_mask(status);
1081	if (mask == 0)
1082		return AC_ERR_OTHER;
1083	return mask;
1084}
1085
1086static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev)
1087{
1088	ap->pad_dma = 0;
1089	ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ,
1090				     &ap->pad_dma, GFP_KERNEL);
1091	return (ap->pad == NULL) ? -ENOMEM : 0;
1092}
1093
1094static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
1095{
1096	dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
1097}
1098
1099static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
1100{
1101	return (struct ata_port *) &host->hostdata[0];
1102}
1103
1104#endif /* __LINUX_LIBATA_H__ */
1105