1/*
2 *  libata-core.c - helper library for ATA
3 *
4 *  Maintained by:  Tejun Heo <tj@kernel.org>
5 *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6 *		    on emails.
7 *
8 *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9 *  Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 *  This program is free software; you can redistribute it and/or modify
13 *  it under the terms of the GNU General Public License as published by
14 *  the Free Software Foundation; either version 2, or (at your option)
15 *  any later version.
16 *
17 *  This program is distributed in the hope that it will be useful,
18 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 *  GNU General Public License for more details.
21 *
22 *  You should have received a copy of the GNU General Public License
23 *  along with this program; see the file COPYING.  If not, write to
24 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 *  libata documentation is available via 'make {ps|pdf}docs',
28 *  as Documentation/DocBook/libata.*
29 *
30 *  Hardware documentation available from http://www.t13.org/ and
31 *  http://www.sata-io.org/
32 *
33 *  Standards documents from:
34 *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 *	http://www.sata-io.org (SATA)
37 *	http://www.compactflash.org (CF)
38 *	http://www.qic.org (QIC157 - Tape and DSC)
39 *	http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
57#include <linux/scatterlist.h>
58#include <linux/io.h>
59#include <linux/async.h>
60#include <linux/log2.h>
61#include <linux/slab.h>
62#include <linux/glob.h>
63#include <scsi/scsi.h>
64#include <scsi/scsi_cmnd.h>
65#include <scsi/scsi_host.h>
66#include <linux/libata.h>
67#include <asm/byteorder.h>
68#include <linux/cdrom.h>
69#include <linux/ratelimit.h>
70#include <linux/pm_runtime.h>
71#include <linux/platform_device.h>
72
73#include "libata.h"
74#include "libata-transport.h"
75
76/* debounce timing parameters in msecs { interval, duration, timeout } */
77const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
78const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
79const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
80
81const struct ata_port_operations ata_base_port_ops = {
82	.prereset		= ata_std_prereset,
83	.postreset		= ata_std_postreset,
84	.error_handler		= ata_std_error_handler,
85	.sched_eh		= ata_std_sched_eh,
86	.end_eh			= ata_std_end_eh,
87};
88
89const struct ata_port_operations sata_port_ops = {
90	.inherits		= &ata_base_port_ops,
91
92	.qc_defer		= ata_std_qc_defer,
93	.hardreset		= sata_std_hardreset,
94};
95
96static unsigned int ata_dev_init_params(struct ata_device *dev,
97					u16 heads, u16 sectors);
98static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
99static void ata_dev_xfermask(struct ata_device *dev);
100static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
101
102atomic_t ata_print_id = ATOMIC_INIT(0);
103
104struct ata_force_param {
105	const char	*name;
106	unsigned int	cbl;
107	int		spd_limit;
108	unsigned long	xfer_mask;
109	unsigned int	horkage_on;
110	unsigned int	horkage_off;
111	unsigned int	lflags;
112};
113
114struct ata_force_ent {
115	int			port;
116	int			device;
117	struct ata_force_param	param;
118};
119
120static struct ata_force_ent *ata_force_tbl;
121static int ata_force_tbl_size;
122
123static char ata_force_param_buf[PAGE_SIZE] __initdata;
124/* param_buf is thrown away after initialization, disallow read */
125module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
126MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
127
128static int atapi_enabled = 1;
129module_param(atapi_enabled, int, 0444);
130MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
131
132static int atapi_dmadir = 0;
133module_param(atapi_dmadir, int, 0444);
134MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
135
136int atapi_passthru16 = 1;
137module_param(atapi_passthru16, int, 0444);
138MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
139
140int libata_fua = 0;
141module_param_named(fua, libata_fua, int, 0444);
142MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
143
144static int ata_ignore_hpa;
145module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
146MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
147
148static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
149module_param_named(dma, libata_dma_mask, int, 0444);
150MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
151
152static int ata_probe_timeout;
153module_param(ata_probe_timeout, int, 0444);
154MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
155
156int libata_noacpi = 0;
157module_param_named(noacpi, libata_noacpi, int, 0444);
158MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
159
160int libata_allow_tpm = 0;
161module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
162MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
163
164static int atapi_an;
165module_param(atapi_an, int, 0444);
166MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
167
168MODULE_AUTHOR("Jeff Garzik");
169MODULE_DESCRIPTION("Library module for ATA devices");
170MODULE_LICENSE("GPL");
171MODULE_VERSION(DRV_VERSION);
172
173
174static bool ata_sstatus_online(u32 sstatus)
175{
176	return (sstatus & 0xf) == 0x3;
177}
178
179/**
180 *	ata_link_next - link iteration helper
181 *	@link: the previous link, NULL to start
182 *	@ap: ATA port containing links to iterate
183 *	@mode: iteration mode, one of ATA_LITER_*
184 *
185 *	LOCKING:
186 *	Host lock or EH context.
187 *
188 *	RETURNS:
189 *	Pointer to the next link.
190 */
191struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
192			       enum ata_link_iter_mode mode)
193{
194	BUG_ON(mode != ATA_LITER_EDGE &&
195	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
196
197	/* NULL link indicates start of iteration */
198	if (!link)
199		switch (mode) {
200		case ATA_LITER_EDGE:
201		case ATA_LITER_PMP_FIRST:
202			if (sata_pmp_attached(ap))
203				return ap->pmp_link;
204			/* fall through */
205		case ATA_LITER_HOST_FIRST:
206			return &ap->link;
207		}
208
209	/* we just iterated over the host link, what's next? */
210	if (link == &ap->link)
211		switch (mode) {
212		case ATA_LITER_HOST_FIRST:
213			if (sata_pmp_attached(ap))
214				return ap->pmp_link;
215			/* fall through */
216		case ATA_LITER_PMP_FIRST:
217			if (unlikely(ap->slave_link))
218				return ap->slave_link;
219			/* fall through */
220		case ATA_LITER_EDGE:
221			return NULL;
222		}
223
224	/* slave_link excludes PMP */
225	if (unlikely(link == ap->slave_link))
226		return NULL;
227
228	/* we were over a PMP link */
229	if (++link < ap->pmp_link + ap->nr_pmp_links)
230		return link;
231
232	if (mode == ATA_LITER_PMP_FIRST)
233		return &ap->link;
234
235	return NULL;
236}
237
238/**
239 *	ata_dev_next - device iteration helper
240 *	@dev: the previous device, NULL to start
241 *	@link: ATA link containing devices to iterate
242 *	@mode: iteration mode, one of ATA_DITER_*
243 *
244 *	LOCKING:
245 *	Host lock or EH context.
246 *
247 *	RETURNS:
248 *	Pointer to the next device.
249 */
250struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
251				enum ata_dev_iter_mode mode)
252{
253	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
254	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
255
256	/* NULL dev indicates start of iteration */
257	if (!dev)
258		switch (mode) {
259		case ATA_DITER_ENABLED:
260		case ATA_DITER_ALL:
261			dev = link->device;
262			goto check;
263		case ATA_DITER_ENABLED_REVERSE:
264		case ATA_DITER_ALL_REVERSE:
265			dev = link->device + ata_link_max_devices(link) - 1;
266			goto check;
267		}
268
269 next:
270	/* move to the next one */
271	switch (mode) {
272	case ATA_DITER_ENABLED:
273	case ATA_DITER_ALL:
274		if (++dev < link->device + ata_link_max_devices(link))
275			goto check;
276		return NULL;
277	case ATA_DITER_ENABLED_REVERSE:
278	case ATA_DITER_ALL_REVERSE:
279		if (--dev >= link->device)
280			goto check;
281		return NULL;
282	}
283
284 check:
285	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
286	    !ata_dev_enabled(dev))
287		goto next;
288	return dev;
289}
290
291/**
292 *	ata_dev_phys_link - find physical link for a device
293 *	@dev: ATA device to look up physical link for
294 *
295 *	Look up physical link which @dev is attached to.  Note that
296 *	this is different from @dev->link only when @dev is on slave
297 *	link.  For all other cases, it's the same as @dev->link.
298 *
299 *	LOCKING:
300 *	Don't care.
301 *
302 *	RETURNS:
303 *	Pointer to the found physical link.
304 */
305struct ata_link *ata_dev_phys_link(struct ata_device *dev)
306{
307	struct ata_port *ap = dev->link->ap;
308
309	if (!ap->slave_link)
310		return dev->link;
311	if (!dev->devno)
312		return &ap->link;
313	return ap->slave_link;
314}
315
316/**
317 *	ata_force_cbl - force cable type according to libata.force
318 *	@ap: ATA port of interest
319 *
320 *	Force cable type according to libata.force and whine about it.
321 *	The last entry which has matching port number is used, so it
322 *	can be specified as part of device force parameters.  For
323 *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
324 *	same effect.
325 *
326 *	LOCKING:
327 *	EH context.
328 */
329void ata_force_cbl(struct ata_port *ap)
330{
331	int i;
332
333	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
334		const struct ata_force_ent *fe = &ata_force_tbl[i];
335
336		if (fe->port != -1 && fe->port != ap->print_id)
337			continue;
338
339		if (fe->param.cbl == ATA_CBL_NONE)
340			continue;
341
342		ap->cbl = fe->param.cbl;
343		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
344		return;
345	}
346}
347
348/**
349 *	ata_force_link_limits - force link limits according to libata.force
350 *	@link: ATA link of interest
351 *
352 *	Force link flags and SATA spd limit according to libata.force
353 *	and whine about it.  When only the port part is specified
354 *	(e.g. 1:), the limit applies to all links connected to both
355 *	the host link and all fan-out ports connected via PMP.  If the
356 *	device part is specified as 0 (e.g. 1.00:), it specifies the
357 *	first fan-out link not the host link.  Device number 15 always
358 *	points to the host link whether PMP is attached or not.  If the
359 *	controller has slave link, device number 16 points to it.
360 *
361 *	LOCKING:
362 *	EH context.
363 */
364static void ata_force_link_limits(struct ata_link *link)
365{
366	bool did_spd = false;
367	int linkno = link->pmp;
368	int i;
369
370	if (ata_is_host_link(link))
371		linkno += 15;
372
373	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
374		const struct ata_force_ent *fe = &ata_force_tbl[i];
375
376		if (fe->port != -1 && fe->port != link->ap->print_id)
377			continue;
378
379		if (fe->device != -1 && fe->device != linkno)
380			continue;
381
382		/* only honor the first spd limit */
383		if (!did_spd && fe->param.spd_limit) {
384			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
385			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
386					fe->param.name);
387			did_spd = true;
388		}
389
390		/* let lflags stack */
391		if (fe->param.lflags) {
392			link->flags |= fe->param.lflags;
393			ata_link_notice(link,
394					"FORCE: link flag 0x%x forced -> 0x%x\n",
395					fe->param.lflags, link->flags);
396		}
397	}
398}
399
400/**
401 *	ata_force_xfermask - force xfermask according to libata.force
402 *	@dev: ATA device of interest
403 *
404 *	Force xfer_mask according to libata.force and whine about it.
405 *	For consistency with link selection, device number 15 selects
406 *	the first device connected to the host link.
407 *
408 *	LOCKING:
409 *	EH context.
410 */
411static void ata_force_xfermask(struct ata_device *dev)
412{
413	int devno = dev->link->pmp + dev->devno;
414	int alt_devno = devno;
415	int i;
416
417	/* allow n.15/16 for devices attached to host port */
418	if (ata_is_host_link(dev->link))
419		alt_devno += 15;
420
421	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
422		const struct ata_force_ent *fe = &ata_force_tbl[i];
423		unsigned long pio_mask, mwdma_mask, udma_mask;
424
425		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
426			continue;
427
428		if (fe->device != -1 && fe->device != devno &&
429		    fe->device != alt_devno)
430			continue;
431
432		if (!fe->param.xfer_mask)
433			continue;
434
435		ata_unpack_xfermask(fe->param.xfer_mask,
436				    &pio_mask, &mwdma_mask, &udma_mask);
437		if (udma_mask)
438			dev->udma_mask = udma_mask;
439		else if (mwdma_mask) {
440			dev->udma_mask = 0;
441			dev->mwdma_mask = mwdma_mask;
442		} else {
443			dev->udma_mask = 0;
444			dev->mwdma_mask = 0;
445			dev->pio_mask = pio_mask;
446		}
447
448		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
449			       fe->param.name);
450		return;
451	}
452}
453
454/**
455 *	ata_force_horkage - force horkage according to libata.force
456 *	@dev: ATA device of interest
457 *
458 *	Force horkage according to libata.force and whine about it.
459 *	For consistency with link selection, device number 15 selects
460 *	the first device connected to the host link.
461 *
462 *	LOCKING:
463 *	EH context.
464 */
465static void ata_force_horkage(struct ata_device *dev)
466{
467	int devno = dev->link->pmp + dev->devno;
468	int alt_devno = devno;
469	int i;
470
471	/* allow n.15/16 for devices attached to host port */
472	if (ata_is_host_link(dev->link))
473		alt_devno += 15;
474
475	for (i = 0; i < ata_force_tbl_size; i++) {
476		const struct ata_force_ent *fe = &ata_force_tbl[i];
477
478		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
479			continue;
480
481		if (fe->device != -1 && fe->device != devno &&
482		    fe->device != alt_devno)
483			continue;
484
485		if (!(~dev->horkage & fe->param.horkage_on) &&
486		    !(dev->horkage & fe->param.horkage_off))
487			continue;
488
489		dev->horkage |= fe->param.horkage_on;
490		dev->horkage &= ~fe->param.horkage_off;
491
492		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
493			       fe->param.name);
494	}
495}
496
497/**
498 *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
499 *	@opcode: SCSI opcode
500 *
501 *	Determine ATAPI command type from @opcode.
502 *
503 *	LOCKING:
504 *	None.
505 *
506 *	RETURNS:
507 *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
508 */
509int atapi_cmd_type(u8 opcode)
510{
511	switch (opcode) {
512	case GPCMD_READ_10:
513	case GPCMD_READ_12:
514		return ATAPI_READ;
515
516	case GPCMD_WRITE_10:
517	case GPCMD_WRITE_12:
518	case GPCMD_WRITE_AND_VERIFY_10:
519		return ATAPI_WRITE;
520
521	case GPCMD_READ_CD:
522	case GPCMD_READ_CD_MSF:
523		return ATAPI_READ_CD;
524
525	case ATA_16:
526	case ATA_12:
527		if (atapi_passthru16)
528			return ATAPI_PASS_THRU;
529		/* fall thru */
530	default:
531		return ATAPI_MISC;
532	}
533}
534
535/**
536 *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
537 *	@tf: Taskfile to convert
538 *	@pmp: Port multiplier port
539 *	@is_cmd: This FIS is for command
540 *	@fis: Buffer into which data will output
541 *
542 *	Converts a standard ATA taskfile to a Serial ATA
543 *	FIS structure (Register - Host to Device).
544 *
545 *	LOCKING:
546 *	Inherited from caller.
547 */
548void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
549{
550	fis[0] = 0x27;			/* Register - Host to Device FIS */
551	fis[1] = pmp & 0xf;		/* Port multiplier number*/
552	if (is_cmd)
553		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
554
555	fis[2] = tf->command;
556	fis[3] = tf->feature;
557
558	fis[4] = tf->lbal;
559	fis[5] = tf->lbam;
560	fis[6] = tf->lbah;
561	fis[7] = tf->device;
562
563	fis[8] = tf->hob_lbal;
564	fis[9] = tf->hob_lbam;
565	fis[10] = tf->hob_lbah;
566	fis[11] = tf->hob_feature;
567
568	fis[12] = tf->nsect;
569	fis[13] = tf->hob_nsect;
570	fis[14] = 0;
571	fis[15] = tf->ctl;
572
573	fis[16] = tf->auxiliary & 0xff;
574	fis[17] = (tf->auxiliary >> 8) & 0xff;
575	fis[18] = (tf->auxiliary >> 16) & 0xff;
576	fis[19] = (tf->auxiliary >> 24) & 0xff;
577}
578
579/**
580 *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
581 *	@fis: Buffer from which data will be input
582 *	@tf: Taskfile to output
583 *
584 *	Converts a serial ATA FIS structure to a standard ATA taskfile.
585 *
586 *	LOCKING:
587 *	Inherited from caller.
588 */
589
590void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
591{
592	tf->command	= fis[2];	/* status */
593	tf->feature	= fis[3];	/* error */
594
595	tf->lbal	= fis[4];
596	tf->lbam	= fis[5];
597	tf->lbah	= fis[6];
598	tf->device	= fis[7];
599
600	tf->hob_lbal	= fis[8];
601	tf->hob_lbam	= fis[9];
602	tf->hob_lbah	= fis[10];
603
604	tf->nsect	= fis[12];
605	tf->hob_nsect	= fis[13];
606}
607
608static const u8 ata_rw_cmds[] = {
609	/* pio multi */
610	ATA_CMD_READ_MULTI,
611	ATA_CMD_WRITE_MULTI,
612	ATA_CMD_READ_MULTI_EXT,
613	ATA_CMD_WRITE_MULTI_EXT,
614	0,
615	0,
616	0,
617	ATA_CMD_WRITE_MULTI_FUA_EXT,
618	/* pio */
619	ATA_CMD_PIO_READ,
620	ATA_CMD_PIO_WRITE,
621	ATA_CMD_PIO_READ_EXT,
622	ATA_CMD_PIO_WRITE_EXT,
623	0,
624	0,
625	0,
626	0,
627	/* dma */
628	ATA_CMD_READ,
629	ATA_CMD_WRITE,
630	ATA_CMD_READ_EXT,
631	ATA_CMD_WRITE_EXT,
632	0,
633	0,
634	0,
635	ATA_CMD_WRITE_FUA_EXT
636};
637
638/**
639 *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
640 *	@tf: command to examine and configure
641 *	@dev: device tf belongs to
642 *
643 *	Examine the device configuration and tf->flags to calculate
644 *	the proper read/write commands and protocol to use.
645 *
646 *	LOCKING:
647 *	caller.
648 */
649static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
650{
651	u8 cmd;
652
653	int index, fua, lba48, write;
654
655	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
656	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
657	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
658
659	if (dev->flags & ATA_DFLAG_PIO) {
660		tf->protocol = ATA_PROT_PIO;
661		index = dev->multi_count ? 0 : 8;
662	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
663		/* Unable to use DMA due to host limitation */
664		tf->protocol = ATA_PROT_PIO;
665		index = dev->multi_count ? 0 : 8;
666	} else {
667		tf->protocol = ATA_PROT_DMA;
668		index = 16;
669	}
670
671	cmd = ata_rw_cmds[index + fua + lba48 + write];
672	if (cmd) {
673		tf->command = cmd;
674		return 0;
675	}
676	return -1;
677}
678
679/**
680 *	ata_tf_read_block - Read block address from ATA taskfile
681 *	@tf: ATA taskfile of interest
682 *	@dev: ATA device @tf belongs to
683 *
684 *	LOCKING:
685 *	None.
686 *
687 *	Read block address from @tf.  This function can handle all
688 *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
689 *	flags select the address format to use.
690 *
691 *	RETURNS:
692 *	Block address read from @tf.
693 */
694u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
695{
696	u64 block = 0;
697
698	if (tf->flags & ATA_TFLAG_LBA) {
699		if (tf->flags & ATA_TFLAG_LBA48) {
700			block |= (u64)tf->hob_lbah << 40;
701			block |= (u64)tf->hob_lbam << 32;
702			block |= (u64)tf->hob_lbal << 24;
703		} else
704			block |= (tf->device & 0xf) << 24;
705
706		block |= tf->lbah << 16;
707		block |= tf->lbam << 8;
708		block |= tf->lbal;
709	} else {
710		u32 cyl, head, sect;
711
712		cyl = tf->lbam | (tf->lbah << 8);
713		head = tf->device & 0xf;
714		sect = tf->lbal;
715
716		if (!sect) {
717			ata_dev_warn(dev,
718				     "device reported invalid CHS sector 0\n");
719			sect = 1; /* oh well */
720		}
721
722		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
723	}
724
725	return block;
726}
727
728/**
729 *	ata_build_rw_tf - Build ATA taskfile for given read/write request
730 *	@tf: Target ATA taskfile
731 *	@dev: ATA device @tf belongs to
732 *	@block: Block address
733 *	@n_block: Number of blocks
734 *	@tf_flags: RW/FUA etc...
735 *	@tag: tag
736 *
737 *	LOCKING:
738 *	None.
739 *
740 *	Build ATA taskfile @tf for read/write request described by
741 *	@block, @n_block, @tf_flags and @tag on @dev.
742 *
743 *	RETURNS:
744 *
745 *	0 on success, -ERANGE if the request is too large for @dev,
746 *	-EINVAL if the request is invalid.
747 */
748int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
749		    u64 block, u32 n_block, unsigned int tf_flags,
750		    unsigned int tag)
751{
752	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
753	tf->flags |= tf_flags;
754
755	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
756		/* yay, NCQ */
757		if (!lba_48_ok(block, n_block))
758			return -ERANGE;
759
760		tf->protocol = ATA_PROT_NCQ;
761		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
762
763		if (tf->flags & ATA_TFLAG_WRITE)
764			tf->command = ATA_CMD_FPDMA_WRITE;
765		else
766			tf->command = ATA_CMD_FPDMA_READ;
767
768		tf->nsect = tag << 3;
769		tf->hob_feature = (n_block >> 8) & 0xff;
770		tf->feature = n_block & 0xff;
771
772		tf->hob_lbah = (block >> 40) & 0xff;
773		tf->hob_lbam = (block >> 32) & 0xff;
774		tf->hob_lbal = (block >> 24) & 0xff;
775		tf->lbah = (block >> 16) & 0xff;
776		tf->lbam = (block >> 8) & 0xff;
777		tf->lbal = block & 0xff;
778
779		tf->device = ATA_LBA;
780		if (tf->flags & ATA_TFLAG_FUA)
781			tf->device |= 1 << 7;
782	} else if (dev->flags & ATA_DFLAG_LBA) {
783		tf->flags |= ATA_TFLAG_LBA;
784
785		if (lba_28_ok(block, n_block)) {
786			/* use LBA28 */
787			tf->device |= (block >> 24) & 0xf;
788		} else if (lba_48_ok(block, n_block)) {
789			if (!(dev->flags & ATA_DFLAG_LBA48))
790				return -ERANGE;
791
792			/* use LBA48 */
793			tf->flags |= ATA_TFLAG_LBA48;
794
795			tf->hob_nsect = (n_block >> 8) & 0xff;
796
797			tf->hob_lbah = (block >> 40) & 0xff;
798			tf->hob_lbam = (block >> 32) & 0xff;
799			tf->hob_lbal = (block >> 24) & 0xff;
800		} else
801			/* request too large even for LBA48 */
802			return -ERANGE;
803
804		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
805			return -EINVAL;
806
807		tf->nsect = n_block & 0xff;
808
809		tf->lbah = (block >> 16) & 0xff;
810		tf->lbam = (block >> 8) & 0xff;
811		tf->lbal = block & 0xff;
812
813		tf->device |= ATA_LBA;
814	} else {
815		/* CHS */
816		u32 sect, head, cyl, track;
817
818		/* The request -may- be too large for CHS addressing. */
819		if (!lba_28_ok(block, n_block))
820			return -ERANGE;
821
822		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
823			return -EINVAL;
824
825		/* Convert LBA to CHS */
826		track = (u32)block / dev->sectors;
827		cyl   = track / dev->heads;
828		head  = track % dev->heads;
829		sect  = (u32)block % dev->sectors + 1;
830
831		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
832			(u32)block, track, cyl, head, sect);
833
834		/* Check whether the converted CHS can fit.
835		   Cylinder: 0-65535
836		   Head: 0-15
837		   Sector: 1-255*/
838		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
839			return -ERANGE;
840
841		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
842		tf->lbal = sect;
843		tf->lbam = cyl;
844		tf->lbah = cyl >> 8;
845		tf->device |= head;
846	}
847
848	return 0;
849}
850
851/**
852 *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
853 *	@pio_mask: pio_mask
854 *	@mwdma_mask: mwdma_mask
855 *	@udma_mask: udma_mask
856 *
857 *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
858 *	unsigned int xfer_mask.
859 *
860 *	LOCKING:
861 *	None.
862 *
863 *	RETURNS:
864 *	Packed xfer_mask.
865 */
866unsigned long ata_pack_xfermask(unsigned long pio_mask,
867				unsigned long mwdma_mask,
868				unsigned long udma_mask)
869{
870	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
871		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
872		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
873}
874
875/**
876 *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
877 *	@xfer_mask: xfer_mask to unpack
878 *	@pio_mask: resulting pio_mask
879 *	@mwdma_mask: resulting mwdma_mask
880 *	@udma_mask: resulting udma_mask
881 *
882 *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
883 *	Any NULL distination masks will be ignored.
884 */
885void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
886			 unsigned long *mwdma_mask, unsigned long *udma_mask)
887{
888	if (pio_mask)
889		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
890	if (mwdma_mask)
891		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
892	if (udma_mask)
893		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
894}
895
896static const struct ata_xfer_ent {
897	int shift, bits;
898	u8 base;
899} ata_xfer_tbl[] = {
900	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
901	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
902	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
903	{ -1, },
904};
905
906/**
907 *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
908 *	@xfer_mask: xfer_mask of interest
909 *
910 *	Return matching XFER_* value for @xfer_mask.  Only the highest
911 *	bit of @xfer_mask is considered.
912 *
913 *	LOCKING:
914 *	None.
915 *
916 *	RETURNS:
917 *	Matching XFER_* value, 0xff if no match found.
918 */
919u8 ata_xfer_mask2mode(unsigned long xfer_mask)
920{
921	int highbit = fls(xfer_mask) - 1;
922	const struct ata_xfer_ent *ent;
923
924	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
925		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
926			return ent->base + highbit - ent->shift;
927	return 0xff;
928}
929
930/**
931 *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
932 *	@xfer_mode: XFER_* of interest
933 *
934 *	Return matching xfer_mask for @xfer_mode.
935 *
936 *	LOCKING:
937 *	None.
938 *
939 *	RETURNS:
940 *	Matching xfer_mask, 0 if no match found.
941 */
942unsigned long ata_xfer_mode2mask(u8 xfer_mode)
943{
944	const struct ata_xfer_ent *ent;
945
946	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
947		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
948			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
949				& ~((1 << ent->shift) - 1);
950	return 0;
951}
952
953/**
954 *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
955 *	@xfer_mode: XFER_* of interest
956 *
957 *	Return matching xfer_shift for @xfer_mode.
958 *
959 *	LOCKING:
960 *	None.
961 *
962 *	RETURNS:
963 *	Matching xfer_shift, -1 if no match found.
964 */
965int ata_xfer_mode2shift(unsigned long xfer_mode)
966{
967	const struct ata_xfer_ent *ent;
968
969	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
970		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
971			return ent->shift;
972	return -1;
973}
974
975/**
976 *	ata_mode_string - convert xfer_mask to string
977 *	@xfer_mask: mask of bits supported; only highest bit counts.
978 *
979 *	Determine string which represents the highest speed
980 *	(highest bit in @modemask).
981 *
982 *	LOCKING:
983 *	None.
984 *
985 *	RETURNS:
986 *	Constant C string representing highest speed listed in
987 *	@mode_mask, or the constant C string "<n/a>".
988 */
989const char *ata_mode_string(unsigned long xfer_mask)
990{
991	static const char * const xfer_mode_str[] = {
992		"PIO0",
993		"PIO1",
994		"PIO2",
995		"PIO3",
996		"PIO4",
997		"PIO5",
998		"PIO6",
999		"MWDMA0",
1000		"MWDMA1",
1001		"MWDMA2",
1002		"MWDMA3",
1003		"MWDMA4",
1004		"UDMA/16",
1005		"UDMA/25",
1006		"UDMA/33",
1007		"UDMA/44",
1008		"UDMA/66",
1009		"UDMA/100",
1010		"UDMA/133",
1011		"UDMA7",
1012	};
1013	int highbit;
1014
1015	highbit = fls(xfer_mask) - 1;
1016	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1017		return xfer_mode_str[highbit];
1018	return "<n/a>";
1019}
1020
1021const char *sata_spd_string(unsigned int spd)
1022{
1023	static const char * const spd_str[] = {
1024		"1.5 Gbps",
1025		"3.0 Gbps",
1026		"6.0 Gbps",
1027	};
1028
1029	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1030		return "<unknown>";
1031	return spd_str[spd - 1];
1032}
1033
1034/**
1035 *	ata_dev_classify - determine device type based on ATA-spec signature
1036 *	@tf: ATA taskfile register set for device to be identified
1037 *
1038 *	Determine from taskfile register contents whether a device is
1039 *	ATA or ATAPI, as per "Signature and persistence" section
1040 *	of ATA/PI spec (volume 1, sect 5.14).
1041 *
1042 *	LOCKING:
1043 *	None.
1044 *
1045 *	RETURNS:
1046 *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1047 *	%ATA_DEV_UNKNOWN the event of failure.
1048 */
1049unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1050{
1051	/* Apple's open source Darwin code hints that some devices only
1052	 * put a proper signature into the LBA mid/high registers,
1053	 * So, we only check those.  It's sufficient for uniqueness.
1054	 *
1055	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1056	 * signatures for ATA and ATAPI devices attached on SerialATA,
1057	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1058	 * spec has never mentioned about using different signatures
1059	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1060	 * Multiplier specification began to use 0x69/0x96 to identify
1061	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1062	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1063	 * 0x69/0x96 shortly and described them as reserved for
1064	 * SerialATA.
1065	 *
1066	 * We follow the current spec and consider that 0x69/0x96
1067	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1068	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1069	 * SEMB signature.  This is worked around in
1070	 * ata_dev_read_id().
1071	 */
1072	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1073		DPRINTK("found ATA device by sig\n");
1074		return ATA_DEV_ATA;
1075	}
1076
1077	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1078		DPRINTK("found ATAPI device by sig\n");
1079		return ATA_DEV_ATAPI;
1080	}
1081
1082	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1083		DPRINTK("found PMP device by sig\n");
1084		return ATA_DEV_PMP;
1085	}
1086
1087	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1088		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1089		return ATA_DEV_SEMB;
1090	}
1091
1092	DPRINTK("unknown device\n");
1093	return ATA_DEV_UNKNOWN;
1094}
1095
1096/**
1097 *	ata_id_string - Convert IDENTIFY DEVICE page into string
1098 *	@id: IDENTIFY DEVICE results we will examine
1099 *	@s: string into which data is output
1100 *	@ofs: offset into identify device page
1101 *	@len: length of string to return. must be an even number.
1102 *
1103 *	The strings in the IDENTIFY DEVICE page are broken up into
1104 *	16-bit chunks.  Run through the string, and output each
1105 *	8-bit chunk linearly, regardless of platform.
1106 *
1107 *	LOCKING:
1108 *	caller.
1109 */
1110
1111void ata_id_string(const u16 *id, unsigned char *s,
1112		   unsigned int ofs, unsigned int len)
1113{
1114	unsigned int c;
1115
1116	BUG_ON(len & 1);
1117
1118	while (len > 0) {
1119		c = id[ofs] >> 8;
1120		*s = c;
1121		s++;
1122
1123		c = id[ofs] & 0xff;
1124		*s = c;
1125		s++;
1126
1127		ofs++;
1128		len -= 2;
1129	}
1130}
1131
1132/**
1133 *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1134 *	@id: IDENTIFY DEVICE results we will examine
1135 *	@s: string into which data is output
1136 *	@ofs: offset into identify device page
1137 *	@len: length of string to return. must be an odd number.
1138 *
1139 *	This function is identical to ata_id_string except that it
1140 *	trims trailing spaces and terminates the resulting string with
1141 *	null.  @len must be actual maximum length (even number) + 1.
1142 *
1143 *	LOCKING:
1144 *	caller.
1145 */
1146void ata_id_c_string(const u16 *id, unsigned char *s,
1147		     unsigned int ofs, unsigned int len)
1148{
1149	unsigned char *p;
1150
1151	ata_id_string(id, s, ofs, len - 1);
1152
1153	p = s + strnlen(s, len - 1);
1154	while (p > s && p[-1] == ' ')
1155		p--;
1156	*p = '\0';
1157}
1158
1159static u64 ata_id_n_sectors(const u16 *id)
1160{
1161	if (ata_id_has_lba(id)) {
1162		if (ata_id_has_lba48(id))
1163			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1164		else
1165			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1166	} else {
1167		if (ata_id_current_chs_valid(id))
1168			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1169			       id[ATA_ID_CUR_SECTORS];
1170		else
1171			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1172			       id[ATA_ID_SECTORS];
1173	}
1174}
1175
1176u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1177{
1178	u64 sectors = 0;
1179
1180	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1181	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1182	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1183	sectors |= (tf->lbah & 0xff) << 16;
1184	sectors |= (tf->lbam & 0xff) << 8;
1185	sectors |= (tf->lbal & 0xff);
1186
1187	return sectors;
1188}
1189
1190u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1191{
1192	u64 sectors = 0;
1193
1194	sectors |= (tf->device & 0x0f) << 24;
1195	sectors |= (tf->lbah & 0xff) << 16;
1196	sectors |= (tf->lbam & 0xff) << 8;
1197	sectors |= (tf->lbal & 0xff);
1198
1199	return sectors;
1200}
1201
1202/**
1203 *	ata_read_native_max_address - Read native max address
1204 *	@dev: target device
1205 *	@max_sectors: out parameter for the result native max address
1206 *
1207 *	Perform an LBA48 or LBA28 native size query upon the device in
1208 *	question.
1209 *
1210 *	RETURNS:
1211 *	0 on success, -EACCES if command is aborted by the drive.
1212 *	-EIO on other errors.
1213 */
1214static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1215{
1216	unsigned int err_mask;
1217	struct ata_taskfile tf;
1218	int lba48 = ata_id_has_lba48(dev->id);
1219
1220	ata_tf_init(dev, &tf);
1221
1222	/* always clear all address registers */
1223	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1224
1225	if (lba48) {
1226		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1227		tf.flags |= ATA_TFLAG_LBA48;
1228	} else
1229		tf.command = ATA_CMD_READ_NATIVE_MAX;
1230
1231	tf.protocol |= ATA_PROT_NODATA;
1232	tf.device |= ATA_LBA;
1233
1234	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1235	if (err_mask) {
1236		ata_dev_warn(dev,
1237			     "failed to read native max address (err_mask=0x%x)\n",
1238			     err_mask);
1239		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1240			return -EACCES;
1241		return -EIO;
1242	}
1243
1244	if (lba48)
1245		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1246	else
1247		*max_sectors = ata_tf_to_lba(&tf) + 1;
1248	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1249		(*max_sectors)--;
1250	return 0;
1251}
1252
1253/**
1254 *	ata_set_max_sectors - Set max sectors
1255 *	@dev: target device
1256 *	@new_sectors: new max sectors value to set for the device
1257 *
1258 *	Set max sectors of @dev to @new_sectors.
1259 *
1260 *	RETURNS:
1261 *	0 on success, -EACCES if command is aborted or denied (due to
1262 *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1263 *	errors.
1264 */
1265static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1266{
1267	unsigned int err_mask;
1268	struct ata_taskfile tf;
1269	int lba48 = ata_id_has_lba48(dev->id);
1270
1271	new_sectors--;
1272
1273	ata_tf_init(dev, &tf);
1274
1275	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1276
1277	if (lba48) {
1278		tf.command = ATA_CMD_SET_MAX_EXT;
1279		tf.flags |= ATA_TFLAG_LBA48;
1280
1281		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1282		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1283		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1284	} else {
1285		tf.command = ATA_CMD_SET_MAX;
1286
1287		tf.device |= (new_sectors >> 24) & 0xf;
1288	}
1289
1290	tf.protocol |= ATA_PROT_NODATA;
1291	tf.device |= ATA_LBA;
1292
1293	tf.lbal = (new_sectors >> 0) & 0xff;
1294	tf.lbam = (new_sectors >> 8) & 0xff;
1295	tf.lbah = (new_sectors >> 16) & 0xff;
1296
1297	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1298	if (err_mask) {
1299		ata_dev_warn(dev,
1300			     "failed to set max address (err_mask=0x%x)\n",
1301			     err_mask);
1302		if (err_mask == AC_ERR_DEV &&
1303		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1304			return -EACCES;
1305		return -EIO;
1306	}
1307
1308	return 0;
1309}
1310
1311/**
1312 *	ata_hpa_resize		-	Resize a device with an HPA set
1313 *	@dev: Device to resize
1314 *
1315 *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1316 *	it if required to the full size of the media. The caller must check
1317 *	the drive has the HPA feature set enabled.
1318 *
1319 *	RETURNS:
1320 *	0 on success, -errno on failure.
1321 */
1322static int ata_hpa_resize(struct ata_device *dev)
1323{
1324	struct ata_eh_context *ehc = &dev->link->eh_context;
1325	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1326	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1327	u64 sectors = ata_id_n_sectors(dev->id);
1328	u64 native_sectors;
1329	int rc;
1330
1331	/* do we need to do it? */
1332	if (dev->class != ATA_DEV_ATA ||
1333	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1334	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1335		return 0;
1336
1337	/* read native max address */
1338	rc = ata_read_native_max_address(dev, &native_sectors);
1339	if (rc) {
1340		/* If device aborted the command or HPA isn't going to
1341		 * be unlocked, skip HPA resizing.
1342		 */
1343		if (rc == -EACCES || !unlock_hpa) {
1344			ata_dev_warn(dev,
1345				     "HPA support seems broken, skipping HPA handling\n");
1346			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1347
1348			/* we can continue if device aborted the command */
1349			if (rc == -EACCES)
1350				rc = 0;
1351		}
1352
1353		return rc;
1354	}
1355	dev->n_native_sectors = native_sectors;
1356
1357	/* nothing to do? */
1358	if (native_sectors <= sectors || !unlock_hpa) {
1359		if (!print_info || native_sectors == sectors)
1360			return 0;
1361
1362		if (native_sectors > sectors)
1363			ata_dev_info(dev,
1364				"HPA detected: current %llu, native %llu\n",
1365				(unsigned long long)sectors,
1366				(unsigned long long)native_sectors);
1367		else if (native_sectors < sectors)
1368			ata_dev_warn(dev,
1369				"native sectors (%llu) is smaller than sectors (%llu)\n",
1370				(unsigned long long)native_sectors,
1371				(unsigned long long)sectors);
1372		return 0;
1373	}
1374
1375	/* let's unlock HPA */
1376	rc = ata_set_max_sectors(dev, native_sectors);
1377	if (rc == -EACCES) {
1378		/* if device aborted the command, skip HPA resizing */
1379		ata_dev_warn(dev,
1380			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1381			     (unsigned long long)sectors,
1382			     (unsigned long long)native_sectors);
1383		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1384		return 0;
1385	} else if (rc)
1386		return rc;
1387
1388	/* re-read IDENTIFY data */
1389	rc = ata_dev_reread_id(dev, 0);
1390	if (rc) {
1391		ata_dev_err(dev,
1392			    "failed to re-read IDENTIFY data after HPA resizing\n");
1393		return rc;
1394	}
1395
1396	if (print_info) {
1397		u64 new_sectors = ata_id_n_sectors(dev->id);
1398		ata_dev_info(dev,
1399			"HPA unlocked: %llu -> %llu, native %llu\n",
1400			(unsigned long long)sectors,
1401			(unsigned long long)new_sectors,
1402			(unsigned long long)native_sectors);
1403	}
1404
1405	return 0;
1406}
1407
1408/**
1409 *	ata_dump_id - IDENTIFY DEVICE info debugging output
1410 *	@id: IDENTIFY DEVICE page to dump
1411 *
1412 *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1413 *	page.
1414 *
1415 *	LOCKING:
1416 *	caller.
1417 */
1418
1419static inline void ata_dump_id(const u16 *id)
1420{
1421	DPRINTK("49==0x%04x  "
1422		"53==0x%04x  "
1423		"63==0x%04x  "
1424		"64==0x%04x  "
1425		"75==0x%04x  \n",
1426		id[49],
1427		id[53],
1428		id[63],
1429		id[64],
1430		id[75]);
1431	DPRINTK("80==0x%04x  "
1432		"81==0x%04x  "
1433		"82==0x%04x  "
1434		"83==0x%04x  "
1435		"84==0x%04x  \n",
1436		id[80],
1437		id[81],
1438		id[82],
1439		id[83],
1440		id[84]);
1441	DPRINTK("88==0x%04x  "
1442		"93==0x%04x\n",
1443		id[88],
1444		id[93]);
1445}
1446
1447/**
1448 *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1449 *	@id: IDENTIFY data to compute xfer mask from
1450 *
1451 *	Compute the xfermask for this device. This is not as trivial
1452 *	as it seems if we must consider early devices correctly.
1453 *
1454 *	FIXME: pre IDE drive timing (do we care ?).
1455 *
1456 *	LOCKING:
1457 *	None.
1458 *
1459 *	RETURNS:
1460 *	Computed xfermask
1461 */
1462unsigned long ata_id_xfermask(const u16 *id)
1463{
1464	unsigned long pio_mask, mwdma_mask, udma_mask;
1465
1466	/* Usual case. Word 53 indicates word 64 is valid */
1467	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1468		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1469		pio_mask <<= 3;
1470		pio_mask |= 0x7;
1471	} else {
1472		/* If word 64 isn't valid then Word 51 high byte holds
1473		 * the PIO timing number for the maximum. Turn it into
1474		 * a mask.
1475		 */
1476		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1477		if (mode < 5)	/* Valid PIO range */
1478			pio_mask = (2 << mode) - 1;
1479		else
1480			pio_mask = 1;
1481
1482		/* But wait.. there's more. Design your standards by
1483		 * committee and you too can get a free iordy field to
1484		 * process. However its the speeds not the modes that
1485		 * are supported... Note drivers using the timing API
1486		 * will get this right anyway
1487		 */
1488	}
1489
1490	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1491
1492	if (ata_id_is_cfa(id)) {
1493		/*
1494		 *	Process compact flash extended modes
1495		 */
1496		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1497		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1498
1499		if (pio)
1500			pio_mask |= (1 << 5);
1501		if (pio > 1)
1502			pio_mask |= (1 << 6);
1503		if (dma)
1504			mwdma_mask |= (1 << 3);
1505		if (dma > 1)
1506			mwdma_mask |= (1 << 4);
1507	}
1508
1509	udma_mask = 0;
1510	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1511		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1512
1513	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1514}
1515
1516static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1517{
1518	struct completion *waiting = qc->private_data;
1519
1520	complete(waiting);
1521}
1522
1523/**
1524 *	ata_exec_internal_sg - execute libata internal command
1525 *	@dev: Device to which the command is sent
1526 *	@tf: Taskfile registers for the command and the result
1527 *	@cdb: CDB for packet command
1528 *	@dma_dir: Data transfer direction of the command
1529 *	@sgl: sg list for the data buffer of the command
1530 *	@n_elem: Number of sg entries
1531 *	@timeout: Timeout in msecs (0 for default)
1532 *
1533 *	Executes libata internal command with timeout.  @tf contains
1534 *	command on entry and result on return.  Timeout and error
1535 *	conditions are reported via return value.  No recovery action
1536 *	is taken after a command times out.  It's caller's duty to
1537 *	clean up after timeout.
1538 *
1539 *	LOCKING:
1540 *	None.  Should be called with kernel context, might sleep.
1541 *
1542 *	RETURNS:
1543 *	Zero on success, AC_ERR_* mask on failure
1544 */
1545unsigned ata_exec_internal_sg(struct ata_device *dev,
1546			      struct ata_taskfile *tf, const u8 *cdb,
1547			      int dma_dir, struct scatterlist *sgl,
1548			      unsigned int n_elem, unsigned long timeout)
1549{
1550	struct ata_link *link = dev->link;
1551	struct ata_port *ap = link->ap;
1552	u8 command = tf->command;
1553	int auto_timeout = 0;
1554	struct ata_queued_cmd *qc;
1555	unsigned int tag, preempted_tag;
1556	u32 preempted_sactive, preempted_qc_active;
1557	int preempted_nr_active_links;
1558	DECLARE_COMPLETION_ONSTACK(wait);
1559	unsigned long flags;
1560	unsigned int err_mask;
1561	int rc;
1562
1563	spin_lock_irqsave(ap->lock, flags);
1564
1565	/* no internal command while frozen */
1566	if (ap->pflags & ATA_PFLAG_FROZEN) {
1567		spin_unlock_irqrestore(ap->lock, flags);
1568		return AC_ERR_SYSTEM;
1569	}
1570
1571	/* initialize internal qc */
1572
1573	/* XXX: Tag 0 is used for drivers with legacy EH as some
1574	 * drivers choke if any other tag is given.  This breaks
1575	 * ata_tag_internal() test for those drivers.  Don't use new
1576	 * EH stuff without converting to it.
1577	 */
1578	if (ap->ops->error_handler)
1579		tag = ATA_TAG_INTERNAL;
1580	else
1581		tag = 0;
1582
1583	if (test_and_set_bit(tag, &ap->qc_allocated))
1584		BUG();
1585	qc = __ata_qc_from_tag(ap, tag);
1586
1587	qc->tag = tag;
1588	qc->scsicmd = NULL;
1589	qc->ap = ap;
1590	qc->dev = dev;
1591	ata_qc_reinit(qc);
1592
1593	preempted_tag = link->active_tag;
1594	preempted_sactive = link->sactive;
1595	preempted_qc_active = ap->qc_active;
1596	preempted_nr_active_links = ap->nr_active_links;
1597	link->active_tag = ATA_TAG_POISON;
1598	link->sactive = 0;
1599	ap->qc_active = 0;
1600	ap->nr_active_links = 0;
1601
1602	/* prepare & issue qc */
1603	qc->tf = *tf;
1604	if (cdb)
1605		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1606
1607	/* some SATA bridges need us to indicate data xfer direction */
1608	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1609	    dma_dir == DMA_FROM_DEVICE)
1610		qc->tf.feature |= ATAPI_DMADIR;
1611
1612	qc->flags |= ATA_QCFLAG_RESULT_TF;
1613	qc->dma_dir = dma_dir;
1614	if (dma_dir != DMA_NONE) {
1615		unsigned int i, buflen = 0;
1616		struct scatterlist *sg;
1617
1618		for_each_sg(sgl, sg, n_elem, i)
1619			buflen += sg->length;
1620
1621		ata_sg_init(qc, sgl, n_elem);
1622		qc->nbytes = buflen;
1623	}
1624
1625	qc->private_data = &wait;
1626	qc->complete_fn = ata_qc_complete_internal;
1627
1628	ata_qc_issue(qc);
1629
1630	spin_unlock_irqrestore(ap->lock, flags);
1631
1632	if (!timeout) {
1633		if (ata_probe_timeout)
1634			timeout = ata_probe_timeout * 1000;
1635		else {
1636			timeout = ata_internal_cmd_timeout(dev, command);
1637			auto_timeout = 1;
1638		}
1639	}
1640
1641	if (ap->ops->error_handler)
1642		ata_eh_release(ap);
1643
1644	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1645
1646	if (ap->ops->error_handler)
1647		ata_eh_acquire(ap);
1648
1649	ata_sff_flush_pio_task(ap);
1650
1651	if (!rc) {
1652		spin_lock_irqsave(ap->lock, flags);
1653
1654		/* We're racing with irq here.  If we lose, the
1655		 * following test prevents us from completing the qc
1656		 * twice.  If we win, the port is frozen and will be
1657		 * cleaned up by ->post_internal_cmd().
1658		 */
1659		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1660			qc->err_mask |= AC_ERR_TIMEOUT;
1661
1662			if (ap->ops->error_handler)
1663				ata_port_freeze(ap);
1664			else
1665				ata_qc_complete(qc);
1666
1667			if (ata_msg_warn(ap))
1668				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1669					     command);
1670		}
1671
1672		spin_unlock_irqrestore(ap->lock, flags);
1673	}
1674
1675	/* do post_internal_cmd */
1676	if (ap->ops->post_internal_cmd)
1677		ap->ops->post_internal_cmd(qc);
1678
1679	/* perform minimal error analysis */
1680	if (qc->flags & ATA_QCFLAG_FAILED) {
1681		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1682			qc->err_mask |= AC_ERR_DEV;
1683
1684		if (!qc->err_mask)
1685			qc->err_mask |= AC_ERR_OTHER;
1686
1687		if (qc->err_mask & ~AC_ERR_OTHER)
1688			qc->err_mask &= ~AC_ERR_OTHER;
1689	}
1690
1691	/* finish up */
1692	spin_lock_irqsave(ap->lock, flags);
1693
1694	*tf = qc->result_tf;
1695	err_mask = qc->err_mask;
1696
1697	ata_qc_free(qc);
1698	link->active_tag = preempted_tag;
1699	link->sactive = preempted_sactive;
1700	ap->qc_active = preempted_qc_active;
1701	ap->nr_active_links = preempted_nr_active_links;
1702
1703	spin_unlock_irqrestore(ap->lock, flags);
1704
1705	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1706		ata_internal_cmd_timed_out(dev, command);
1707
1708	return err_mask;
1709}
1710
1711/**
1712 *	ata_exec_internal - execute libata internal command
1713 *	@dev: Device to which the command is sent
1714 *	@tf: Taskfile registers for the command and the result
1715 *	@cdb: CDB for packet command
1716 *	@dma_dir: Data transfer direction of the command
1717 *	@buf: Data buffer of the command
1718 *	@buflen: Length of data buffer
1719 *	@timeout: Timeout in msecs (0 for default)
1720 *
1721 *	Wrapper around ata_exec_internal_sg() which takes simple
1722 *	buffer instead of sg list.
1723 *
1724 *	LOCKING:
1725 *	None.  Should be called with kernel context, might sleep.
1726 *
1727 *	RETURNS:
1728 *	Zero on success, AC_ERR_* mask on failure
1729 */
1730unsigned ata_exec_internal(struct ata_device *dev,
1731			   struct ata_taskfile *tf, const u8 *cdb,
1732			   int dma_dir, void *buf, unsigned int buflen,
1733			   unsigned long timeout)
1734{
1735	struct scatterlist *psg = NULL, sg;
1736	unsigned int n_elem = 0;
1737
1738	if (dma_dir != DMA_NONE) {
1739		WARN_ON(!buf);
1740		sg_init_one(&sg, buf, buflen);
1741		psg = &sg;
1742		n_elem++;
1743	}
1744
1745	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1746				    timeout);
1747}
1748
1749/**
1750 *	ata_do_simple_cmd - execute simple internal command
1751 *	@dev: Device to which the command is sent
1752 *	@cmd: Opcode to execute
1753 *
1754 *	Execute a 'simple' command, that only consists of the opcode
1755 *	'cmd' itself, without filling any other registers
1756 *
1757 *	LOCKING:
1758 *	Kernel thread context (may sleep).
1759 *
1760 *	RETURNS:
1761 *	Zero on success, AC_ERR_* mask on failure
1762 */
1763unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1764{
1765	struct ata_taskfile tf;
1766
1767	ata_tf_init(dev, &tf);
1768
1769	tf.command = cmd;
1770	tf.flags |= ATA_TFLAG_DEVICE;
1771	tf.protocol = ATA_PROT_NODATA;
1772
1773	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1774}
1775
1776/**
1777 *	ata_pio_need_iordy	-	check if iordy needed
1778 *	@adev: ATA device
1779 *
1780 *	Check if the current speed of the device requires IORDY. Used
1781 *	by various controllers for chip configuration.
1782 */
1783unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1784{
1785	/* Don't set IORDY if we're preparing for reset.  IORDY may
1786	 * lead to controller lock up on certain controllers if the
1787	 * port is not occupied.  See bko#11703 for details.
1788	 */
1789	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1790		return 0;
1791	/* Controller doesn't support IORDY.  Probably a pointless
1792	 * check as the caller should know this.
1793	 */
1794	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1795		return 0;
1796	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1797	if (ata_id_is_cfa(adev->id)
1798	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1799		return 0;
1800	/* PIO3 and higher it is mandatory */
1801	if (adev->pio_mode > XFER_PIO_2)
1802		return 1;
1803	/* We turn it on when possible */
1804	if (ata_id_has_iordy(adev->id))
1805		return 1;
1806	return 0;
1807}
1808
1809/**
1810 *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1811 *	@adev: ATA device
1812 *
1813 *	Compute the highest mode possible if we are not using iordy. Return
1814 *	-1 if no iordy mode is available.
1815 */
1816static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1817{
1818	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1819	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1820		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1821		/* Is the speed faster than the drive allows non IORDY ? */
1822		if (pio) {
1823			/* This is cycle times not frequency - watch the logic! */
1824			if (pio > 240)	/* PIO2 is 240nS per cycle */
1825				return 3 << ATA_SHIFT_PIO;
1826			return 7 << ATA_SHIFT_PIO;
1827		}
1828	}
1829	return 3 << ATA_SHIFT_PIO;
1830}
1831
1832/**
1833 *	ata_do_dev_read_id		-	default ID read method
1834 *	@dev: device
1835 *	@tf: proposed taskfile
1836 *	@id: data buffer
1837 *
1838 *	Issue the identify taskfile and hand back the buffer containing
1839 *	identify data. For some RAID controllers and for pre ATA devices
1840 *	this function is wrapped or replaced by the driver
1841 */
1842unsigned int ata_do_dev_read_id(struct ata_device *dev,
1843					struct ata_taskfile *tf, u16 *id)
1844{
1845	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1846				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1847}
1848
1849/**
1850 *	ata_dev_read_id - Read ID data from the specified device
1851 *	@dev: target device
1852 *	@p_class: pointer to class of the target device (may be changed)
1853 *	@flags: ATA_READID_* flags
1854 *	@id: buffer to read IDENTIFY data into
1855 *
1856 *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1857 *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1858 *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1859 *	for pre-ATA4 drives.
1860 *
1861 *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1862 *	now we abort if we hit that case.
1863 *
1864 *	LOCKING:
1865 *	Kernel thread context (may sleep)
1866 *
1867 *	RETURNS:
1868 *	0 on success, -errno otherwise.
1869 */
1870int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1871		    unsigned int flags, u16 *id)
1872{
1873	struct ata_port *ap = dev->link->ap;
1874	unsigned int class = *p_class;
1875	struct ata_taskfile tf;
1876	unsigned int err_mask = 0;
1877	const char *reason;
1878	bool is_semb = class == ATA_DEV_SEMB;
1879	int may_fallback = 1, tried_spinup = 0;
1880	int rc;
1881
1882	if (ata_msg_ctl(ap))
1883		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1884
1885retry:
1886	ata_tf_init(dev, &tf);
1887
1888	switch (class) {
1889	case ATA_DEV_SEMB:
1890		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1891	case ATA_DEV_ATA:
1892		tf.command = ATA_CMD_ID_ATA;
1893		break;
1894	case ATA_DEV_ATAPI:
1895		tf.command = ATA_CMD_ID_ATAPI;
1896		break;
1897	default:
1898		rc = -ENODEV;
1899		reason = "unsupported class";
1900		goto err_out;
1901	}
1902
1903	tf.protocol = ATA_PROT_PIO;
1904
1905	/* Some devices choke if TF registers contain garbage.  Make
1906	 * sure those are properly initialized.
1907	 */
1908	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1909
1910	/* Device presence detection is unreliable on some
1911	 * controllers.  Always poll IDENTIFY if available.
1912	 */
1913	tf.flags |= ATA_TFLAG_POLLING;
1914
1915	if (ap->ops->read_id)
1916		err_mask = ap->ops->read_id(dev, &tf, id);
1917	else
1918		err_mask = ata_do_dev_read_id(dev, &tf, id);
1919
1920	if (err_mask) {
1921		if (err_mask & AC_ERR_NODEV_HINT) {
1922			ata_dev_dbg(dev, "NODEV after polling detection\n");
1923			return -ENOENT;
1924		}
1925
1926		if (is_semb) {
1927			ata_dev_info(dev,
1928		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1929			/* SEMB is not supported yet */
1930			*p_class = ATA_DEV_SEMB_UNSUP;
1931			return 0;
1932		}
1933
1934		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1935			/* Device or controller might have reported
1936			 * the wrong device class.  Give a shot at the
1937			 * other IDENTIFY if the current one is
1938			 * aborted by the device.
1939			 */
1940			if (may_fallback) {
1941				may_fallback = 0;
1942
1943				if (class == ATA_DEV_ATA)
1944					class = ATA_DEV_ATAPI;
1945				else
1946					class = ATA_DEV_ATA;
1947				goto retry;
1948			}
1949
1950			/* Control reaches here iff the device aborted
1951			 * both flavors of IDENTIFYs which happens
1952			 * sometimes with phantom devices.
1953			 */
1954			ata_dev_dbg(dev,
1955				    "both IDENTIFYs aborted, assuming NODEV\n");
1956			return -ENOENT;
1957		}
1958
1959		rc = -EIO;
1960		reason = "I/O error";
1961		goto err_out;
1962	}
1963
1964	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1965		ata_dev_dbg(dev, "dumping IDENTIFY data, "
1966			    "class=%d may_fallback=%d tried_spinup=%d\n",
1967			    class, may_fallback, tried_spinup);
1968		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1969			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1970	}
1971
1972	/* Falling back doesn't make sense if ID data was read
1973	 * successfully at least once.
1974	 */
1975	may_fallback = 0;
1976
1977	swap_buf_le16(id, ATA_ID_WORDS);
1978
1979	/* sanity check */
1980	rc = -EINVAL;
1981	reason = "device reports invalid type";
1982
1983	if (class == ATA_DEV_ATA) {
1984		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1985			goto err_out;
1986		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1987							ata_id_is_ata(id)) {
1988			ata_dev_dbg(dev,
1989				"host indicates ignore ATA devices, ignored\n");
1990			return -ENOENT;
1991		}
1992	} else {
1993		if (ata_id_is_ata(id))
1994			goto err_out;
1995	}
1996
1997	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1998		tried_spinup = 1;
1999		/*
2000		 * Drive powered-up in standby mode, and requires a specific
2001		 * SET_FEATURES spin-up subcommand before it will accept
2002		 * anything other than the original IDENTIFY command.
2003		 */
2004		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2005		if (err_mask && id[2] != 0x738c) {
2006			rc = -EIO;
2007			reason = "SPINUP failed";
2008			goto err_out;
2009		}
2010		/*
2011		 * If the drive initially returned incomplete IDENTIFY info,
2012		 * we now must reissue the IDENTIFY command.
2013		 */
2014		if (id[2] == 0x37c8)
2015			goto retry;
2016	}
2017
2018	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2019		/*
2020		 * The exact sequence expected by certain pre-ATA4 drives is:
2021		 * SRST RESET
2022		 * IDENTIFY (optional in early ATA)
2023		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2024		 * anything else..
2025		 * Some drives were very specific about that exact sequence.
2026		 *
2027		 * Note that ATA4 says lba is mandatory so the second check
2028		 * should never trigger.
2029		 */
2030		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2031			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2032			if (err_mask) {
2033				rc = -EIO;
2034				reason = "INIT_DEV_PARAMS failed";
2035				goto err_out;
2036			}
2037
2038			/* current CHS translation info (id[53-58]) might be
2039			 * changed. reread the identify device info.
2040			 */
2041			flags &= ~ATA_READID_POSTRESET;
2042			goto retry;
2043		}
2044	}
2045
2046	*p_class = class;
2047
2048	return 0;
2049
2050 err_out:
2051	if (ata_msg_warn(ap))
2052		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2053			     reason, err_mask);
2054	return rc;
2055}
2056
2057static int ata_do_link_spd_horkage(struct ata_device *dev)
2058{
2059	struct ata_link *plink = ata_dev_phys_link(dev);
2060	u32 target, target_limit;
2061
2062	if (!sata_scr_valid(plink))
2063		return 0;
2064
2065	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2066		target = 1;
2067	else
2068		return 0;
2069
2070	target_limit = (1 << target) - 1;
2071
2072	/* if already on stricter limit, no need to push further */
2073	if (plink->sata_spd_limit <= target_limit)
2074		return 0;
2075
2076	plink->sata_spd_limit = target_limit;
2077
2078	/* Request another EH round by returning -EAGAIN if link is
2079	 * going faster than the target speed.  Forward progress is
2080	 * guaranteed by setting sata_spd_limit to target_limit above.
2081	 */
2082	if (plink->sata_spd > target) {
2083		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2084			     sata_spd_string(target));
2085		return -EAGAIN;
2086	}
2087	return 0;
2088}
2089
2090static inline u8 ata_dev_knobble(struct ata_device *dev)
2091{
2092	struct ata_port *ap = dev->link->ap;
2093
2094	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2095		return 0;
2096
2097	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2098}
2099
2100static int ata_dev_config_ncq(struct ata_device *dev,
2101			       char *desc, size_t desc_sz)
2102{
2103	struct ata_port *ap = dev->link->ap;
2104	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2105	unsigned int err_mask;
2106	char *aa_desc = "";
2107
2108	if (!ata_id_has_ncq(dev->id)) {
2109		desc[0] = '\0';
2110		return 0;
2111	}
2112	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2113		snprintf(desc, desc_sz, "NCQ (not used)");
2114		return 0;
2115	}
2116	if (ap->flags & ATA_FLAG_NCQ) {
2117		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2118		dev->flags |= ATA_DFLAG_NCQ;
2119	}
2120
2121	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2122		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2123		ata_id_has_fpdma_aa(dev->id)) {
2124		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2125			SATA_FPDMA_AA);
2126		if (err_mask) {
2127			ata_dev_err(dev,
2128				    "failed to enable AA (error_mask=0x%x)\n",
2129				    err_mask);
2130			if (err_mask != AC_ERR_DEV) {
2131				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2132				return -EIO;
2133			}
2134		} else
2135			aa_desc = ", AA";
2136	}
2137
2138	if (hdepth >= ddepth)
2139		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2140	else
2141		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2142			ddepth, aa_desc);
2143
2144	if ((ap->flags & ATA_FLAG_FPDMA_AUX) &&
2145	    ata_id_has_ncq_send_and_recv(dev->id)) {
2146		err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2147					     0, ap->sector_buf, 1);
2148		if (err_mask) {
2149			ata_dev_dbg(dev,
2150				    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2151				    err_mask);
2152		} else {
2153			u8 *cmds = dev->ncq_send_recv_cmds;
2154
2155			dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2156			memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2157
2158			if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2159				ata_dev_dbg(dev, "disabling queued TRIM support\n");
2160				cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2161					~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2162			}
2163		}
2164	}
2165
2166	return 0;
2167}
2168
2169/**
2170 *	ata_dev_configure - Configure the specified ATA/ATAPI device
2171 *	@dev: Target device to configure
2172 *
2173 *	Configure @dev according to @dev->id.  Generic and low-level
2174 *	driver specific fixups are also applied.
2175 *
2176 *	LOCKING:
2177 *	Kernel thread context (may sleep)
2178 *
2179 *	RETURNS:
2180 *	0 on success, -errno otherwise
2181 */
2182int ata_dev_configure(struct ata_device *dev)
2183{
2184	struct ata_port *ap = dev->link->ap;
2185	struct ata_eh_context *ehc = &dev->link->eh_context;
2186	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2187	const u16 *id = dev->id;
2188	unsigned long xfer_mask;
2189	unsigned int err_mask;
2190	char revbuf[7];		/* XYZ-99\0 */
2191	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2192	char modelbuf[ATA_ID_PROD_LEN+1];
2193	int rc;
2194
2195	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2196		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2197		return 0;
2198	}
2199
2200	if (ata_msg_probe(ap))
2201		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2202
2203	/* set horkage */
2204	dev->horkage |= ata_dev_blacklisted(dev);
2205	ata_force_horkage(dev);
2206
2207	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2208		ata_dev_info(dev, "unsupported device, disabling\n");
2209		ata_dev_disable(dev);
2210		return 0;
2211	}
2212
2213	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2214	    dev->class == ATA_DEV_ATAPI) {
2215		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2216			     atapi_enabled ? "not supported with this driver"
2217			     : "disabled");
2218		ata_dev_disable(dev);
2219		return 0;
2220	}
2221
2222	rc = ata_do_link_spd_horkage(dev);
2223	if (rc)
2224		return rc;
2225
2226	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2227	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2228	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2229		dev->horkage |= ATA_HORKAGE_NOLPM;
2230
2231	if (dev->horkage & ATA_HORKAGE_NOLPM) {
2232		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2233		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2234	}
2235
2236	/* let ACPI work its magic */
2237	rc = ata_acpi_on_devcfg(dev);
2238	if (rc)
2239		return rc;
2240
2241	/* massage HPA, do it early as it might change IDENTIFY data */
2242	rc = ata_hpa_resize(dev);
2243	if (rc)
2244		return rc;
2245
2246	/* print device capabilities */
2247	if (ata_msg_probe(ap))
2248		ata_dev_dbg(dev,
2249			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2250			    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2251			    __func__,
2252			    id[49], id[82], id[83], id[84],
2253			    id[85], id[86], id[87], id[88]);
2254
2255	/* initialize to-be-configured parameters */
2256	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2257	dev->max_sectors = 0;
2258	dev->cdb_len = 0;
2259	dev->n_sectors = 0;
2260	dev->cylinders = 0;
2261	dev->heads = 0;
2262	dev->sectors = 0;
2263	dev->multi_count = 0;
2264
2265	/*
2266	 * common ATA, ATAPI feature tests
2267	 */
2268
2269	/* find max transfer mode; for printk only */
2270	xfer_mask = ata_id_xfermask(id);
2271
2272	if (ata_msg_probe(ap))
2273		ata_dump_id(id);
2274
2275	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2276	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2277			sizeof(fwrevbuf));
2278
2279	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2280			sizeof(modelbuf));
2281
2282	/* ATA-specific feature tests */
2283	if (dev->class == ATA_DEV_ATA) {
2284		if (ata_id_is_cfa(id)) {
2285			/* CPRM may make this media unusable */
2286			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2287				ata_dev_warn(dev,
2288	"supports DRM functions and may not be fully accessible\n");
2289			snprintf(revbuf, 7, "CFA");
2290		} else {
2291			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2292			/* Warn the user if the device has TPM extensions */
2293			if (ata_id_has_tpm(id))
2294				ata_dev_warn(dev,
2295	"supports DRM functions and may not be fully accessible\n");
2296		}
2297
2298		dev->n_sectors = ata_id_n_sectors(id);
2299
2300		/* get current R/W Multiple count setting */
2301		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2302			unsigned int max = dev->id[47] & 0xff;
2303			unsigned int cnt = dev->id[59] & 0xff;
2304			/* only recognize/allow powers of two here */
2305			if (is_power_of_2(max) && is_power_of_2(cnt))
2306				if (cnt <= max)
2307					dev->multi_count = cnt;
2308		}
2309
2310		if (ata_id_has_lba(id)) {
2311			const char *lba_desc;
2312			char ncq_desc[24];
2313
2314			lba_desc = "LBA";
2315			dev->flags |= ATA_DFLAG_LBA;
2316			if (ata_id_has_lba48(id)) {
2317				dev->flags |= ATA_DFLAG_LBA48;
2318				lba_desc = "LBA48";
2319
2320				if (dev->n_sectors >= (1UL << 28) &&
2321				    ata_id_has_flush_ext(id))
2322					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2323			}
2324
2325			/* config NCQ */
2326			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2327			if (rc)
2328				return rc;
2329
2330			/* print device info to dmesg */
2331			if (ata_msg_drv(ap) && print_info) {
2332				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2333					     revbuf, modelbuf, fwrevbuf,
2334					     ata_mode_string(xfer_mask));
2335				ata_dev_info(dev,
2336					     "%llu sectors, multi %u: %s %s\n",
2337					(unsigned long long)dev->n_sectors,
2338					dev->multi_count, lba_desc, ncq_desc);
2339			}
2340		} else {
2341			/* CHS */
2342
2343			/* Default translation */
2344			dev->cylinders	= id[1];
2345			dev->heads	= id[3];
2346			dev->sectors	= id[6];
2347
2348			if (ata_id_current_chs_valid(id)) {
2349				/* Current CHS translation is valid. */
2350				dev->cylinders = id[54];
2351				dev->heads     = id[55];
2352				dev->sectors   = id[56];
2353			}
2354
2355			/* print device info to dmesg */
2356			if (ata_msg_drv(ap) && print_info) {
2357				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2358					     revbuf,	modelbuf, fwrevbuf,
2359					     ata_mode_string(xfer_mask));
2360				ata_dev_info(dev,
2361					     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2362					     (unsigned long long)dev->n_sectors,
2363					     dev->multi_count, dev->cylinders,
2364					     dev->heads, dev->sectors);
2365			}
2366		}
2367
2368		/* Check and mark DevSlp capability. Get DevSlp timing variables
2369		 * from SATA Settings page of Identify Device Data Log.
2370		 */
2371		if (ata_id_has_devslp(dev->id)) {
2372			u8 *sata_setting = ap->sector_buf;
2373			int i, j;
2374
2375			dev->flags |= ATA_DFLAG_DEVSLP;
2376			err_mask = ata_read_log_page(dev,
2377						     ATA_LOG_SATA_ID_DEV_DATA,
2378						     ATA_LOG_SATA_SETTINGS,
2379						     sata_setting,
2380						     1);
2381			if (err_mask)
2382				ata_dev_dbg(dev,
2383					    "failed to get Identify Device Data, Emask 0x%x\n",
2384					    err_mask);
2385			else
2386				for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2387					j = ATA_LOG_DEVSLP_OFFSET + i;
2388					dev->devslp_timing[i] = sata_setting[j];
2389				}
2390		}
2391
2392		dev->cdb_len = 16;
2393	}
2394
2395	/* ATAPI-specific feature tests */
2396	else if (dev->class == ATA_DEV_ATAPI) {
2397		const char *cdb_intr_string = "";
2398		const char *atapi_an_string = "";
2399		const char *dma_dir_string = "";
2400		u32 sntf;
2401
2402		rc = atapi_cdb_len(id);
2403		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2404			if (ata_msg_warn(ap))
2405				ata_dev_warn(dev, "unsupported CDB len\n");
2406			rc = -EINVAL;
2407			goto err_out_nosup;
2408		}
2409		dev->cdb_len = (unsigned int) rc;
2410
2411		/* Enable ATAPI AN if both the host and device have
2412		 * the support.  If PMP is attached, SNTF is required
2413		 * to enable ATAPI AN to discern between PHY status
2414		 * changed notifications and ATAPI ANs.
2415		 */
2416		if (atapi_an &&
2417		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2418		    (!sata_pmp_attached(ap) ||
2419		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2420			/* issue SET feature command to turn this on */
2421			err_mask = ata_dev_set_feature(dev,
2422					SETFEATURES_SATA_ENABLE, SATA_AN);
2423			if (err_mask)
2424				ata_dev_err(dev,
2425					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
2426					    err_mask);
2427			else {
2428				dev->flags |= ATA_DFLAG_AN;
2429				atapi_an_string = ", ATAPI AN";
2430			}
2431		}
2432
2433		if (ata_id_cdb_intr(dev->id)) {
2434			dev->flags |= ATA_DFLAG_CDB_INTR;
2435			cdb_intr_string = ", CDB intr";
2436		}
2437
2438		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2439			dev->flags |= ATA_DFLAG_DMADIR;
2440			dma_dir_string = ", DMADIR";
2441		}
2442
2443		if (ata_id_has_da(dev->id)) {
2444			dev->flags |= ATA_DFLAG_DA;
2445			zpodd_init(dev);
2446		}
2447
2448		/* print device info to dmesg */
2449		if (ata_msg_drv(ap) && print_info)
2450			ata_dev_info(dev,
2451				     "ATAPI: %s, %s, max %s%s%s%s\n",
2452				     modelbuf, fwrevbuf,
2453				     ata_mode_string(xfer_mask),
2454				     cdb_intr_string, atapi_an_string,
2455				     dma_dir_string);
2456	}
2457
2458	/* determine max_sectors */
2459	dev->max_sectors = ATA_MAX_SECTORS;
2460	if (dev->flags & ATA_DFLAG_LBA48)
2461		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2462
2463	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2464	   200 sectors */
2465	if (ata_dev_knobble(dev)) {
2466		if (ata_msg_drv(ap) && print_info)
2467			ata_dev_info(dev, "applying bridge limits\n");
2468		dev->udma_mask &= ATA_UDMA5;
2469		dev->max_sectors = ATA_MAX_SECTORS;
2470	}
2471
2472	if ((dev->class == ATA_DEV_ATAPI) &&
2473	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2474		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2475		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2476	}
2477
2478	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2479		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2480					 dev->max_sectors);
2481
2482	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2483		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2484
2485	if (ap->ops->dev_config)
2486		ap->ops->dev_config(dev);
2487
2488	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2489		/* Let the user know. We don't want to disallow opens for
2490		   rescue purposes, or in case the vendor is just a blithering
2491		   idiot. Do this after the dev_config call as some controllers
2492		   with buggy firmware may want to avoid reporting false device
2493		   bugs */
2494
2495		if (print_info) {
2496			ata_dev_warn(dev,
2497"Drive reports diagnostics failure. This may indicate a drive\n");
2498			ata_dev_warn(dev,
2499"fault or invalid emulation. Contact drive vendor for information.\n");
2500		}
2501	}
2502
2503	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2504		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2505		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
2506	}
2507
2508	return 0;
2509
2510err_out_nosup:
2511	if (ata_msg_probe(ap))
2512		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2513	return rc;
2514}
2515
2516/**
2517 *	ata_cable_40wire	-	return 40 wire cable type
2518 *	@ap: port
2519 *
2520 *	Helper method for drivers which want to hardwire 40 wire cable
2521 *	detection.
2522 */
2523
2524int ata_cable_40wire(struct ata_port *ap)
2525{
2526	return ATA_CBL_PATA40;
2527}
2528
2529/**
2530 *	ata_cable_80wire	-	return 80 wire cable type
2531 *	@ap: port
2532 *
2533 *	Helper method for drivers which want to hardwire 80 wire cable
2534 *	detection.
2535 */
2536
2537int ata_cable_80wire(struct ata_port *ap)
2538{
2539	return ATA_CBL_PATA80;
2540}
2541
2542/**
2543 *	ata_cable_unknown	-	return unknown PATA cable.
2544 *	@ap: port
2545 *
2546 *	Helper method for drivers which have no PATA cable detection.
2547 */
2548
2549int ata_cable_unknown(struct ata_port *ap)
2550{
2551	return ATA_CBL_PATA_UNK;
2552}
2553
2554/**
2555 *	ata_cable_ignore	-	return ignored PATA cable.
2556 *	@ap: port
2557 *
2558 *	Helper method for drivers which don't use cable type to limit
2559 *	transfer mode.
2560 */
2561int ata_cable_ignore(struct ata_port *ap)
2562{
2563	return ATA_CBL_PATA_IGN;
2564}
2565
2566/**
2567 *	ata_cable_sata	-	return SATA cable type
2568 *	@ap: port
2569 *
2570 *	Helper method for drivers which have SATA cables
2571 */
2572
2573int ata_cable_sata(struct ata_port *ap)
2574{
2575	return ATA_CBL_SATA;
2576}
2577
2578/**
2579 *	ata_bus_probe - Reset and probe ATA bus
2580 *	@ap: Bus to probe
2581 *
2582 *	Master ATA bus probing function.  Initiates a hardware-dependent
2583 *	bus reset, then attempts to identify any devices found on
2584 *	the bus.
2585 *
2586 *	LOCKING:
2587 *	PCI/etc. bus probe sem.
2588 *
2589 *	RETURNS:
2590 *	Zero on success, negative errno otherwise.
2591 */
2592
2593int ata_bus_probe(struct ata_port *ap)
2594{
2595	unsigned int classes[ATA_MAX_DEVICES];
2596	int tries[ATA_MAX_DEVICES];
2597	int rc;
2598	struct ata_device *dev;
2599
2600	ata_for_each_dev(dev, &ap->link, ALL)
2601		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2602
2603 retry:
2604	ata_for_each_dev(dev, &ap->link, ALL) {
2605		/* If we issue an SRST then an ATA drive (not ATAPI)
2606		 * may change configuration and be in PIO0 timing. If
2607		 * we do a hard reset (or are coming from power on)
2608		 * this is true for ATA or ATAPI. Until we've set a
2609		 * suitable controller mode we should not touch the
2610		 * bus as we may be talking too fast.
2611		 */
2612		dev->pio_mode = XFER_PIO_0;
2613		dev->dma_mode = 0xff;
2614
2615		/* If the controller has a pio mode setup function
2616		 * then use it to set the chipset to rights. Don't
2617		 * touch the DMA setup as that will be dealt with when
2618		 * configuring devices.
2619		 */
2620		if (ap->ops->set_piomode)
2621			ap->ops->set_piomode(ap, dev);
2622	}
2623
2624	/* reset and determine device classes */
2625	ap->ops->phy_reset(ap);
2626
2627	ata_for_each_dev(dev, &ap->link, ALL) {
2628		if (dev->class != ATA_DEV_UNKNOWN)
2629			classes[dev->devno] = dev->class;
2630		else
2631			classes[dev->devno] = ATA_DEV_NONE;
2632
2633		dev->class = ATA_DEV_UNKNOWN;
2634	}
2635
2636	/* read IDENTIFY page and configure devices. We have to do the identify
2637	   specific sequence bass-ackwards so that PDIAG- is released by
2638	   the slave device */
2639
2640	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2641		if (tries[dev->devno])
2642			dev->class = classes[dev->devno];
2643
2644		if (!ata_dev_enabled(dev))
2645			continue;
2646
2647		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2648				     dev->id);
2649		if (rc)
2650			goto fail;
2651	}
2652
2653	/* Now ask for the cable type as PDIAG- should have been released */
2654	if (ap->ops->cable_detect)
2655		ap->cbl = ap->ops->cable_detect(ap);
2656
2657	/* We may have SATA bridge glue hiding here irrespective of
2658	 * the reported cable types and sensed types.  When SATA
2659	 * drives indicate we have a bridge, we don't know which end
2660	 * of the link the bridge is which is a problem.
2661	 */
2662	ata_for_each_dev(dev, &ap->link, ENABLED)
2663		if (ata_id_is_sata(dev->id))
2664			ap->cbl = ATA_CBL_SATA;
2665
2666	/* After the identify sequence we can now set up the devices. We do
2667	   this in the normal order so that the user doesn't get confused */
2668
2669	ata_for_each_dev(dev, &ap->link, ENABLED) {
2670		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2671		rc = ata_dev_configure(dev);
2672		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2673		if (rc)
2674			goto fail;
2675	}
2676
2677	/* configure transfer mode */
2678	rc = ata_set_mode(&ap->link, &dev);
2679	if (rc)
2680		goto fail;
2681
2682	ata_for_each_dev(dev, &ap->link, ENABLED)
2683		return 0;
2684
2685	return -ENODEV;
2686
2687 fail:
2688	tries[dev->devno]--;
2689
2690	switch (rc) {
2691	case -EINVAL:
2692		/* eeek, something went very wrong, give up */
2693		tries[dev->devno] = 0;
2694		break;
2695
2696	case -ENODEV:
2697		/* give it just one more chance */
2698		tries[dev->devno] = min(tries[dev->devno], 1);
2699	case -EIO:
2700		if (tries[dev->devno] == 1) {
2701			/* This is the last chance, better to slow
2702			 * down than lose it.
2703			 */
2704			sata_down_spd_limit(&ap->link, 0);
2705			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2706		}
2707	}
2708
2709	if (!tries[dev->devno])
2710		ata_dev_disable(dev);
2711
2712	goto retry;
2713}
2714
2715/**
2716 *	sata_print_link_status - Print SATA link status
2717 *	@link: SATA link to printk link status about
2718 *
2719 *	This function prints link speed and status of a SATA link.
2720 *
2721 *	LOCKING:
2722 *	None.
2723 */
2724static void sata_print_link_status(struct ata_link *link)
2725{
2726	u32 sstatus, scontrol, tmp;
2727
2728	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2729		return;
2730	sata_scr_read(link, SCR_CONTROL, &scontrol);
2731
2732	if (ata_phys_link_online(link)) {
2733		tmp = (sstatus >> 4) & 0xf;
2734		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2735			      sata_spd_string(tmp), sstatus, scontrol);
2736	} else {
2737		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2738			      sstatus, scontrol);
2739	}
2740}
2741
2742/**
2743 *	ata_dev_pair		-	return other device on cable
2744 *	@adev: device
2745 *
2746 *	Obtain the other device on the same cable, or if none is
2747 *	present NULL is returned
2748 */
2749
2750struct ata_device *ata_dev_pair(struct ata_device *adev)
2751{
2752	struct ata_link *link = adev->link;
2753	struct ata_device *pair = &link->device[1 - adev->devno];
2754	if (!ata_dev_enabled(pair))
2755		return NULL;
2756	return pair;
2757}
2758
2759/**
2760 *	sata_down_spd_limit - adjust SATA spd limit downward
2761 *	@link: Link to adjust SATA spd limit for
2762 *	@spd_limit: Additional limit
2763 *
2764 *	Adjust SATA spd limit of @link downward.  Note that this
2765 *	function only adjusts the limit.  The change must be applied
2766 *	using sata_set_spd().
2767 *
2768 *	If @spd_limit is non-zero, the speed is limited to equal to or
2769 *	lower than @spd_limit if such speed is supported.  If
2770 *	@spd_limit is slower than any supported speed, only the lowest
2771 *	supported speed is allowed.
2772 *
2773 *	LOCKING:
2774 *	Inherited from caller.
2775 *
2776 *	RETURNS:
2777 *	0 on success, negative errno on failure
2778 */
2779int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2780{
2781	u32 sstatus, spd, mask;
2782	int rc, bit;
2783
2784	if (!sata_scr_valid(link))
2785		return -EOPNOTSUPP;
2786
2787	/* If SCR can be read, use it to determine the current SPD.
2788	 * If not, use cached value in link->sata_spd.
2789	 */
2790	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2791	if (rc == 0 && ata_sstatus_online(sstatus))
2792		spd = (sstatus >> 4) & 0xf;
2793	else
2794		spd = link->sata_spd;
2795
2796	mask = link->sata_spd_limit;
2797	if (mask <= 1)
2798		return -EINVAL;
2799
2800	/* unconditionally mask off the highest bit */
2801	bit = fls(mask) - 1;
2802	mask &= ~(1 << bit);
2803
2804	/* Mask off all speeds higher than or equal to the current
2805	 * one.  Force 1.5Gbps if current SPD is not available.
2806	 */
2807	if (spd > 1)
2808		mask &= (1 << (spd - 1)) - 1;
2809	else
2810		mask &= 1;
2811
2812	/* were we already at the bottom? */
2813	if (!mask)
2814		return -EINVAL;
2815
2816	if (spd_limit) {
2817		if (mask & ((1 << spd_limit) - 1))
2818			mask &= (1 << spd_limit) - 1;
2819		else {
2820			bit = ffs(mask) - 1;
2821			mask = 1 << bit;
2822		}
2823	}
2824
2825	link->sata_spd_limit = mask;
2826
2827	ata_link_warn(link, "limiting SATA link speed to %s\n",
2828		      sata_spd_string(fls(mask)));
2829
2830	return 0;
2831}
2832
2833static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2834{
2835	struct ata_link *host_link = &link->ap->link;
2836	u32 limit, target, spd;
2837
2838	limit = link->sata_spd_limit;
2839
2840	/* Don't configure downstream link faster than upstream link.
2841	 * It doesn't speed up anything and some PMPs choke on such
2842	 * configuration.
2843	 */
2844	if (!ata_is_host_link(link) && host_link->sata_spd)
2845		limit &= (1 << host_link->sata_spd) - 1;
2846
2847	if (limit == UINT_MAX)
2848		target = 0;
2849	else
2850		target = fls(limit);
2851
2852	spd = (*scontrol >> 4) & 0xf;
2853	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2854
2855	return spd != target;
2856}
2857
2858/**
2859 *	sata_set_spd_needed - is SATA spd configuration needed
2860 *	@link: Link in question
2861 *
2862 *	Test whether the spd limit in SControl matches
2863 *	@link->sata_spd_limit.  This function is used to determine
2864 *	whether hardreset is necessary to apply SATA spd
2865 *	configuration.
2866 *
2867 *	LOCKING:
2868 *	Inherited from caller.
2869 *
2870 *	RETURNS:
2871 *	1 if SATA spd configuration is needed, 0 otherwise.
2872 */
2873static int sata_set_spd_needed(struct ata_link *link)
2874{
2875	u32 scontrol;
2876
2877	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2878		return 1;
2879
2880	return __sata_set_spd_needed(link, &scontrol);
2881}
2882
2883/**
2884 *	sata_set_spd - set SATA spd according to spd limit
2885 *	@link: Link to set SATA spd for
2886 *
2887 *	Set SATA spd of @link according to sata_spd_limit.
2888 *
2889 *	LOCKING:
2890 *	Inherited from caller.
2891 *
2892 *	RETURNS:
2893 *	0 if spd doesn't need to be changed, 1 if spd has been
2894 *	changed.  Negative errno if SCR registers are inaccessible.
2895 */
2896int sata_set_spd(struct ata_link *link)
2897{
2898	u32 scontrol;
2899	int rc;
2900
2901	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2902		return rc;
2903
2904	if (!__sata_set_spd_needed(link, &scontrol))
2905		return 0;
2906
2907	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2908		return rc;
2909
2910	return 1;
2911}
2912
2913/*
2914 * This mode timing computation functionality is ported over from
2915 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2916 */
2917/*
2918 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2919 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2920 * for UDMA6, which is currently supported only by Maxtor drives.
2921 *
2922 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2923 */
2924
2925static const struct ata_timing ata_timing[] = {
2926/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
2927	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
2928	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
2929	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
2930	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
2931	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
2932	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
2933	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
2934
2935	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
2936	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
2937	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
2938
2939	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
2940	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
2941	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
2942	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
2943	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
2944
2945/*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
2946	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
2947	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
2948	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
2949	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
2950	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
2951	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
2952	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
2953
2954	{ 0xFF }
2955};
2956
2957#define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2958#define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2959
2960static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2961{
2962	q->setup	= EZ(t->setup      * 1000,  T);
2963	q->act8b	= EZ(t->act8b      * 1000,  T);
2964	q->rec8b	= EZ(t->rec8b      * 1000,  T);
2965	q->cyc8b	= EZ(t->cyc8b      * 1000,  T);
2966	q->active	= EZ(t->active     * 1000,  T);
2967	q->recover	= EZ(t->recover    * 1000,  T);
2968	q->dmack_hold	= EZ(t->dmack_hold * 1000,  T);
2969	q->cycle	= EZ(t->cycle      * 1000,  T);
2970	q->udma		= EZ(t->udma       * 1000, UT);
2971}
2972
2973void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2974		      struct ata_timing *m, unsigned int what)
2975{
2976	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2977	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2978	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2979	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2980	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2981	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2982	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2983	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2984	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2985}
2986
2987const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2988{
2989	const struct ata_timing *t = ata_timing;
2990
2991	while (xfer_mode > t->mode)
2992		t++;
2993
2994	if (xfer_mode == t->mode)
2995		return t;
2996
2997	WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
2998			__func__, xfer_mode);
2999
3000	return NULL;
3001}
3002
3003int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3004		       struct ata_timing *t, int T, int UT)
3005{
3006	const u16 *id = adev->id;
3007	const struct ata_timing *s;
3008	struct ata_timing p;
3009
3010	/*
3011	 * Find the mode.
3012	 */
3013
3014	if (!(s = ata_timing_find_mode(speed)))
3015		return -EINVAL;
3016
3017	memcpy(t, s, sizeof(*s));
3018
3019	/*
3020	 * If the drive is an EIDE drive, it can tell us it needs extended
3021	 * PIO/MW_DMA cycle timing.
3022	 */
3023
3024	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3025		memset(&p, 0, sizeof(p));
3026
3027		if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3028			if (speed <= XFER_PIO_2)
3029				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3030			else if ((speed <= XFER_PIO_4) ||
3031				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3032				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3033		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3034			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3035
3036		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3037	}
3038
3039	/*
3040	 * Convert the timing to bus clock counts.
3041	 */
3042
3043	ata_timing_quantize(t, t, T, UT);
3044
3045	/*
3046	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3047	 * S.M.A.R.T * and some other commands. We have to ensure that the
3048	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3049	 */
3050
3051	if (speed > XFER_PIO_6) {
3052		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3053		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3054	}
3055
3056	/*
3057	 * Lengthen active & recovery time so that cycle time is correct.
3058	 */
3059
3060	if (t->act8b + t->rec8b < t->cyc8b) {
3061		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3062		t->rec8b = t->cyc8b - t->act8b;
3063	}
3064
3065	if (t->active + t->recover < t->cycle) {
3066		t->active += (t->cycle - (t->active + t->recover)) / 2;
3067		t->recover = t->cycle - t->active;
3068	}
3069
3070	/* In a few cases quantisation may produce enough errors to
3071	   leave t->cycle too low for the sum of active and recovery
3072	   if so we must correct this */
3073	if (t->active + t->recover > t->cycle)
3074		t->cycle = t->active + t->recover;
3075
3076	return 0;
3077}
3078
3079/**
3080 *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3081 *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3082 *	@cycle: cycle duration in ns
3083 *
3084 *	Return matching xfer mode for @cycle.  The returned mode is of
3085 *	the transfer type specified by @xfer_shift.  If @cycle is too
3086 *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3087 *	than the fastest known mode, the fasted mode is returned.
3088 *
3089 *	LOCKING:
3090 *	None.
3091 *
3092 *	RETURNS:
3093 *	Matching xfer_mode, 0xff if no match found.
3094 */
3095u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3096{
3097	u8 base_mode = 0xff, last_mode = 0xff;
3098	const struct ata_xfer_ent *ent;
3099	const struct ata_timing *t;
3100
3101	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3102		if (ent->shift == xfer_shift)
3103			base_mode = ent->base;
3104
3105	for (t = ata_timing_find_mode(base_mode);
3106	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3107		unsigned short this_cycle;
3108
3109		switch (xfer_shift) {
3110		case ATA_SHIFT_PIO:
3111		case ATA_SHIFT_MWDMA:
3112			this_cycle = t->cycle;
3113			break;
3114		case ATA_SHIFT_UDMA:
3115			this_cycle = t->udma;
3116			break;
3117		default:
3118			return 0xff;
3119		}
3120
3121		if (cycle > this_cycle)
3122			break;
3123
3124		last_mode = t->mode;
3125	}
3126
3127	return last_mode;
3128}
3129
3130/**
3131 *	ata_down_xfermask_limit - adjust dev xfer masks downward
3132 *	@dev: Device to adjust xfer masks
3133 *	@sel: ATA_DNXFER_* selector
3134 *
3135 *	Adjust xfer masks of @dev downward.  Note that this function
3136 *	does not apply the change.  Invoking ata_set_mode() afterwards
3137 *	will apply the limit.
3138 *
3139 *	LOCKING:
3140 *	Inherited from caller.
3141 *
3142 *	RETURNS:
3143 *	0 on success, negative errno on failure
3144 */
3145int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3146{
3147	char buf[32];
3148	unsigned long orig_mask, xfer_mask;
3149	unsigned long pio_mask, mwdma_mask, udma_mask;
3150	int quiet, highbit;
3151
3152	quiet = !!(sel & ATA_DNXFER_QUIET);
3153	sel &= ~ATA_DNXFER_QUIET;
3154
3155	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3156						  dev->mwdma_mask,
3157						  dev->udma_mask);
3158	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3159
3160	switch (sel) {
3161	case ATA_DNXFER_PIO:
3162		highbit = fls(pio_mask) - 1;
3163		pio_mask &= ~(1 << highbit);
3164		break;
3165
3166	case ATA_DNXFER_DMA:
3167		if (udma_mask) {
3168			highbit = fls(udma_mask) - 1;
3169			udma_mask &= ~(1 << highbit);
3170			if (!udma_mask)
3171				return -ENOENT;
3172		} else if (mwdma_mask) {
3173			highbit = fls(mwdma_mask) - 1;
3174			mwdma_mask &= ~(1 << highbit);
3175			if (!mwdma_mask)
3176				return -ENOENT;
3177		}
3178		break;
3179
3180	case ATA_DNXFER_40C:
3181		udma_mask &= ATA_UDMA_MASK_40C;
3182		break;
3183
3184	case ATA_DNXFER_FORCE_PIO0:
3185		pio_mask &= 1;
3186	case ATA_DNXFER_FORCE_PIO:
3187		mwdma_mask = 0;
3188		udma_mask = 0;
3189		break;
3190
3191	default:
3192		BUG();
3193	}
3194
3195	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3196
3197	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3198		return -ENOENT;
3199
3200	if (!quiet) {
3201		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3202			snprintf(buf, sizeof(buf), "%s:%s",
3203				 ata_mode_string(xfer_mask),
3204				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3205		else
3206			snprintf(buf, sizeof(buf), "%s",
3207				 ata_mode_string(xfer_mask));
3208
3209		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3210	}
3211
3212	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3213			    &dev->udma_mask);
3214
3215	return 0;
3216}
3217
3218static int ata_dev_set_mode(struct ata_device *dev)
3219{
3220	struct ata_port *ap = dev->link->ap;
3221	struct ata_eh_context *ehc = &dev->link->eh_context;
3222	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3223	const char *dev_err_whine = "";
3224	int ign_dev_err = 0;
3225	unsigned int err_mask = 0;
3226	int rc;
3227
3228	dev->flags &= ~ATA_DFLAG_PIO;
3229	if (dev->xfer_shift == ATA_SHIFT_PIO)
3230		dev->flags |= ATA_DFLAG_PIO;
3231
3232	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3233		dev_err_whine = " (SET_XFERMODE skipped)";
3234	else {
3235		if (nosetxfer)
3236			ata_dev_warn(dev,
3237				     "NOSETXFER but PATA detected - can't "
3238				     "skip SETXFER, might malfunction\n");
3239		err_mask = ata_dev_set_xfermode(dev);
3240	}
3241
3242	if (err_mask & ~AC_ERR_DEV)
3243		goto fail;
3244
3245	/* revalidate */
3246	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3247	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3248	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3249	if (rc)
3250		return rc;
3251
3252	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3253		/* Old CFA may refuse this command, which is just fine */
3254		if (ata_id_is_cfa(dev->id))
3255			ign_dev_err = 1;
3256		/* Catch several broken garbage emulations plus some pre
3257		   ATA devices */
3258		if (ata_id_major_version(dev->id) == 0 &&
3259					dev->pio_mode <= XFER_PIO_2)
3260			ign_dev_err = 1;
3261		/* Some very old devices and some bad newer ones fail
3262		   any kind of SET_XFERMODE request but support PIO0-2
3263		   timings and no IORDY */
3264		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3265			ign_dev_err = 1;
3266	}
3267	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3268	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3269	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3270	    dev->dma_mode == XFER_MW_DMA_0 &&
3271	    (dev->id[63] >> 8) & 1)
3272		ign_dev_err = 1;
3273
3274	/* if the device is actually configured correctly, ignore dev err */
3275	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3276		ign_dev_err = 1;
3277
3278	if (err_mask & AC_ERR_DEV) {
3279		if (!ign_dev_err)
3280			goto fail;
3281		else
3282			dev_err_whine = " (device error ignored)";
3283	}
3284
3285	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3286		dev->xfer_shift, (int)dev->xfer_mode);
3287
3288	ata_dev_info(dev, "configured for %s%s\n",
3289		     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3290		     dev_err_whine);
3291
3292	return 0;
3293
3294 fail:
3295	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3296	return -EIO;
3297}
3298
3299/**
3300 *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3301 *	@link: link on which timings will be programmed
3302 *	@r_failed_dev: out parameter for failed device
3303 *
3304 *	Standard implementation of the function used to tune and set
3305 *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3306 *	ata_dev_set_mode() fails, pointer to the failing device is
3307 *	returned in @r_failed_dev.
3308 *
3309 *	LOCKING:
3310 *	PCI/etc. bus probe sem.
3311 *
3312 *	RETURNS:
3313 *	0 on success, negative errno otherwise
3314 */
3315
3316int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3317{
3318	struct ata_port *ap = link->ap;
3319	struct ata_device *dev;
3320	int rc = 0, used_dma = 0, found = 0;
3321
3322	/* step 1: calculate xfer_mask */
3323	ata_for_each_dev(dev, link, ENABLED) {
3324		unsigned long pio_mask, dma_mask;
3325		unsigned int mode_mask;
3326
3327		mode_mask = ATA_DMA_MASK_ATA;
3328		if (dev->class == ATA_DEV_ATAPI)
3329			mode_mask = ATA_DMA_MASK_ATAPI;
3330		else if (ata_id_is_cfa(dev->id))
3331			mode_mask = ATA_DMA_MASK_CFA;
3332
3333		ata_dev_xfermask(dev);
3334		ata_force_xfermask(dev);
3335
3336		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3337
3338		if (libata_dma_mask & mode_mask)
3339			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3340						     dev->udma_mask);
3341		else
3342			dma_mask = 0;
3343
3344		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3345		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3346
3347		found = 1;
3348		if (ata_dma_enabled(dev))
3349			used_dma = 1;
3350	}
3351	if (!found)
3352		goto out;
3353
3354	/* step 2: always set host PIO timings */
3355	ata_for_each_dev(dev, link, ENABLED) {
3356		if (dev->pio_mode == 0xff) {
3357			ata_dev_warn(dev, "no PIO support\n");
3358			rc = -EINVAL;
3359			goto out;
3360		}
3361
3362		dev->xfer_mode = dev->pio_mode;
3363		dev->xfer_shift = ATA_SHIFT_PIO;
3364		if (ap->ops->set_piomode)
3365			ap->ops->set_piomode(ap, dev);
3366	}
3367
3368	/* step 3: set host DMA timings */
3369	ata_for_each_dev(dev, link, ENABLED) {
3370		if (!ata_dma_enabled(dev))
3371			continue;
3372
3373		dev->xfer_mode = dev->dma_mode;
3374		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3375		if (ap->ops->set_dmamode)
3376			ap->ops->set_dmamode(ap, dev);
3377	}
3378
3379	/* step 4: update devices' xfer mode */
3380	ata_for_each_dev(dev, link, ENABLED) {
3381		rc = ata_dev_set_mode(dev);
3382		if (rc)
3383			goto out;
3384	}
3385
3386	/* Record simplex status. If we selected DMA then the other
3387	 * host channels are not permitted to do so.
3388	 */
3389	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3390		ap->host->simplex_claimed = ap;
3391
3392 out:
3393	if (rc)
3394		*r_failed_dev = dev;
3395	return rc;
3396}
3397
3398/**
3399 *	ata_wait_ready - wait for link to become ready
3400 *	@link: link to be waited on
3401 *	@deadline: deadline jiffies for the operation
3402 *	@check_ready: callback to check link readiness
3403 *
3404 *	Wait for @link to become ready.  @check_ready should return
3405 *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3406 *	link doesn't seem to be occupied, other errno for other error
3407 *	conditions.
3408 *
3409 *	Transient -ENODEV conditions are allowed for
3410 *	ATA_TMOUT_FF_WAIT.
3411 *
3412 *	LOCKING:
3413 *	EH context.
3414 *
3415 *	RETURNS:
3416 *	0 if @linke is ready before @deadline; otherwise, -errno.
3417 */
3418int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3419		   int (*check_ready)(struct ata_link *link))
3420{
3421	unsigned long start = jiffies;
3422	unsigned long nodev_deadline;
3423	int warned = 0;
3424
3425	/* choose which 0xff timeout to use, read comment in libata.h */
3426	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3427		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3428	else
3429		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3430
3431	/* Slave readiness can't be tested separately from master.  On
3432	 * M/S emulation configuration, this function should be called
3433	 * only on the master and it will handle both master and slave.
3434	 */
3435	WARN_ON(link == link->ap->slave_link);
3436
3437	if (time_after(nodev_deadline, deadline))
3438		nodev_deadline = deadline;
3439
3440	while (1) {
3441		unsigned long now = jiffies;
3442		int ready, tmp;
3443
3444		ready = tmp = check_ready(link);
3445		if (ready > 0)
3446			return 0;
3447
3448		/*
3449		 * -ENODEV could be transient.  Ignore -ENODEV if link
3450		 * is online.  Also, some SATA devices take a long
3451		 * time to clear 0xff after reset.  Wait for
3452		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3453		 * offline.
3454		 *
3455		 * Note that some PATA controllers (pata_ali) explode
3456		 * if status register is read more than once when
3457		 * there's no device attached.
3458		 */
3459		if (ready == -ENODEV) {
3460			if (ata_link_online(link))
3461				ready = 0;
3462			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3463				 !ata_link_offline(link) &&
3464				 time_before(now, nodev_deadline))
3465				ready = 0;
3466		}
3467
3468		if (ready)
3469			return ready;
3470		if (time_after(now, deadline))
3471			return -EBUSY;
3472
3473		if (!warned && time_after(now, start + 5 * HZ) &&
3474		    (deadline - now > 3 * HZ)) {
3475			ata_link_warn(link,
3476				"link is slow to respond, please be patient "
3477				"(ready=%d)\n", tmp);
3478			warned = 1;
3479		}
3480
3481		ata_msleep(link->ap, 50);
3482	}
3483}
3484
3485/**
3486 *	ata_wait_after_reset - wait for link to become ready after reset
3487 *	@link: link to be waited on
3488 *	@deadline: deadline jiffies for the operation
3489 *	@check_ready: callback to check link readiness
3490 *
3491 *	Wait for @link to become ready after reset.
3492 *
3493 *	LOCKING:
3494 *	EH context.
3495 *
3496 *	RETURNS:
3497 *	0 if @linke is ready before @deadline; otherwise, -errno.
3498 */
3499int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3500				int (*check_ready)(struct ata_link *link))
3501{
3502	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3503
3504	return ata_wait_ready(link, deadline, check_ready);
3505}
3506
3507/**
3508 *	sata_link_debounce - debounce SATA phy status
3509 *	@link: ATA link to debounce SATA phy status for
3510 *	@params: timing parameters { interval, duratinon, timeout } in msec
3511 *	@deadline: deadline jiffies for the operation
3512 *
3513 *	Make sure SStatus of @link reaches stable state, determined by
3514 *	holding the same value where DET is not 1 for @duration polled
3515 *	every @interval, before @timeout.  Timeout constraints the
3516 *	beginning of the stable state.  Because DET gets stuck at 1 on
3517 *	some controllers after hot unplugging, this functions waits
3518 *	until timeout then returns 0 if DET is stable at 1.
3519 *
3520 *	@timeout is further limited by @deadline.  The sooner of the
3521 *	two is used.
3522 *
3523 *	LOCKING:
3524 *	Kernel thread context (may sleep)
3525 *
3526 *	RETURNS:
3527 *	0 on success, -errno on failure.
3528 */
3529int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3530		       unsigned long deadline)
3531{
3532	unsigned long interval = params[0];
3533	unsigned long duration = params[1];
3534	unsigned long last_jiffies, t;
3535	u32 last, cur;
3536	int rc;
3537
3538	t = ata_deadline(jiffies, params[2]);
3539	if (time_before(t, deadline))
3540		deadline = t;
3541
3542	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3543		return rc;
3544	cur &= 0xf;
3545
3546	last = cur;
3547	last_jiffies = jiffies;
3548
3549	while (1) {
3550		ata_msleep(link->ap, interval);
3551		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3552			return rc;
3553		cur &= 0xf;
3554
3555		/* DET stable? */
3556		if (cur == last) {
3557			if (cur == 1 && time_before(jiffies, deadline))
3558				continue;
3559			if (time_after(jiffies,
3560				       ata_deadline(last_jiffies, duration)))
3561				return 0;
3562			continue;
3563		}
3564
3565		/* unstable, start over */
3566		last = cur;
3567		last_jiffies = jiffies;
3568
3569		/* Check deadline.  If debouncing failed, return
3570		 * -EPIPE to tell upper layer to lower link speed.
3571		 */
3572		if (time_after(jiffies, deadline))
3573			return -EPIPE;
3574	}
3575}
3576
3577/**
3578 *	sata_link_resume - resume SATA link
3579 *	@link: ATA link to resume SATA
3580 *	@params: timing parameters { interval, duratinon, timeout } in msec
3581 *	@deadline: deadline jiffies for the operation
3582 *
3583 *	Resume SATA phy @link and debounce it.
3584 *
3585 *	LOCKING:
3586 *	Kernel thread context (may sleep)
3587 *
3588 *	RETURNS:
3589 *	0 on success, -errno on failure.
3590 */
3591int sata_link_resume(struct ata_link *link, const unsigned long *params,
3592		     unsigned long deadline)
3593{
3594	int tries = ATA_LINK_RESUME_TRIES;
3595	u32 scontrol, serror;
3596	int rc;
3597
3598	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3599		return rc;
3600
3601	/*
3602	 * Writes to SControl sometimes get ignored under certain
3603	 * controllers (ata_piix SIDPR).  Make sure DET actually is
3604	 * cleared.
3605	 */
3606	do {
3607		scontrol = (scontrol & 0x0f0) | 0x300;
3608		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3609			return rc;
3610		/*
3611		 * Some PHYs react badly if SStatus is pounded
3612		 * immediately after resuming.  Delay 200ms before
3613		 * debouncing.
3614		 */
3615		ata_msleep(link->ap, 200);
3616
3617		/* is SControl restored correctly? */
3618		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3619			return rc;
3620	} while ((scontrol & 0xf0f) != 0x300 && --tries);
3621
3622	if ((scontrol & 0xf0f) != 0x300) {
3623		ata_link_warn(link, "failed to resume link (SControl %X)\n",
3624			     scontrol);
3625		return 0;
3626	}
3627
3628	if (tries < ATA_LINK_RESUME_TRIES)
3629		ata_link_warn(link, "link resume succeeded after %d retries\n",
3630			      ATA_LINK_RESUME_TRIES - tries);
3631
3632	if ((rc = sata_link_debounce(link, params, deadline)))
3633		return rc;
3634
3635	/* clear SError, some PHYs require this even for SRST to work */
3636	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3637		rc = sata_scr_write(link, SCR_ERROR, serror);
3638
3639	return rc != -EINVAL ? rc : 0;
3640}
3641
3642/**
3643 *	sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3644 *	@link: ATA link to manipulate SControl for
3645 *	@policy: LPM policy to configure
3646 *	@spm_wakeup: initiate LPM transition to active state
3647 *
3648 *	Manipulate the IPM field of the SControl register of @link
3649 *	according to @policy.  If @policy is ATA_LPM_MAX_POWER and
3650 *	@spm_wakeup is %true, the SPM field is manipulated to wake up
3651 *	the link.  This function also clears PHYRDY_CHG before
3652 *	returning.
3653 *
3654 *	LOCKING:
3655 *	EH context.
3656 *
3657 *	RETURNS:
3658 *	0 on succes, -errno otherwise.
3659 */
3660int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3661		      bool spm_wakeup)
3662{
3663	struct ata_eh_context *ehc = &link->eh_context;
3664	bool woken_up = false;
3665	u32 scontrol;
3666	int rc;
3667
3668	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3669	if (rc)
3670		return rc;
3671
3672	switch (policy) {
3673	case ATA_LPM_MAX_POWER:
3674		/* disable all LPM transitions */
3675		scontrol |= (0x7 << 8);
3676		/* initiate transition to active state */
3677		if (spm_wakeup) {
3678			scontrol |= (0x4 << 12);
3679			woken_up = true;
3680		}
3681		break;
3682	case ATA_LPM_MED_POWER:
3683		/* allow LPM to PARTIAL */
3684		scontrol &= ~(0x1 << 8);
3685		scontrol |= (0x6 << 8);
3686		break;
3687	case ATA_LPM_MIN_POWER:
3688		if (ata_link_nr_enabled(link) > 0)
3689			/* no restrictions on LPM transitions */
3690			scontrol &= ~(0x7 << 8);
3691		else {
3692			/* empty port, power off */
3693			scontrol &= ~0xf;
3694			scontrol |= (0x1 << 2);
3695		}
3696		break;
3697	default:
3698		WARN_ON(1);
3699	}
3700
3701	rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3702	if (rc)
3703		return rc;
3704
3705	/* give the link time to transit out of LPM state */
3706	if (woken_up)
3707		msleep(10);
3708
3709	/* clear PHYRDY_CHG from SError */
3710	ehc->i.serror &= ~SERR_PHYRDY_CHG;
3711	return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3712}
3713
3714/**
3715 *	ata_std_prereset - prepare for reset
3716 *	@link: ATA link to be reset
3717 *	@deadline: deadline jiffies for the operation
3718 *
3719 *	@link is about to be reset.  Initialize it.  Failure from
3720 *	prereset makes libata abort whole reset sequence and give up
3721 *	that port, so prereset should be best-effort.  It does its
3722 *	best to prepare for reset sequence but if things go wrong, it
3723 *	should just whine, not fail.
3724 *
3725 *	LOCKING:
3726 *	Kernel thread context (may sleep)
3727 *
3728 *	RETURNS:
3729 *	0 on success, -errno otherwise.
3730 */
3731int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3732{
3733	struct ata_port *ap = link->ap;
3734	struct ata_eh_context *ehc = &link->eh_context;
3735	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3736	int rc;
3737
3738	/* if we're about to do hardreset, nothing more to do */
3739	if (ehc->i.action & ATA_EH_HARDRESET)
3740		return 0;
3741
3742	/* if SATA, resume link */
3743	if (ap->flags & ATA_FLAG_SATA) {
3744		rc = sata_link_resume(link, timing, deadline);
3745		/* whine about phy resume failure but proceed */
3746		if (rc && rc != -EOPNOTSUPP)
3747			ata_link_warn(link,
3748				      "failed to resume link for reset (errno=%d)\n",
3749				      rc);
3750	}
3751
3752	/* no point in trying softreset on offline link */
3753	if (ata_phys_link_offline(link))
3754		ehc->i.action &= ~ATA_EH_SOFTRESET;
3755
3756	return 0;
3757}
3758
3759/**
3760 *	sata_link_hardreset - reset link via SATA phy reset
3761 *	@link: link to reset
3762 *	@timing: timing parameters { interval, duratinon, timeout } in msec
3763 *	@deadline: deadline jiffies for the operation
3764 *	@online: optional out parameter indicating link onlineness
3765 *	@check_ready: optional callback to check link readiness
3766 *
3767 *	SATA phy-reset @link using DET bits of SControl register.
3768 *	After hardreset, link readiness is waited upon using
3769 *	ata_wait_ready() if @check_ready is specified.  LLDs are
3770 *	allowed to not specify @check_ready and wait itself after this
3771 *	function returns.  Device classification is LLD's
3772 *	responsibility.
3773 *
3774 *	*@online is set to one iff reset succeeded and @link is online
3775 *	after reset.
3776 *
3777 *	LOCKING:
3778 *	Kernel thread context (may sleep)
3779 *
3780 *	RETURNS:
3781 *	0 on success, -errno otherwise.
3782 */
3783int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3784			unsigned long deadline,
3785			bool *online, int (*check_ready)(struct ata_link *))
3786{
3787	u32 scontrol;
3788	int rc;
3789
3790	DPRINTK("ENTER\n");
3791
3792	if (online)
3793		*online = false;
3794
3795	if (sata_set_spd_needed(link)) {
3796		/* SATA spec says nothing about how to reconfigure
3797		 * spd.  To be on the safe side, turn off phy during
3798		 * reconfiguration.  This works for at least ICH7 AHCI
3799		 * and Sil3124.
3800		 */
3801		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3802			goto out;
3803
3804		scontrol = (scontrol & 0x0f0) | 0x304;
3805
3806		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3807			goto out;
3808
3809		sata_set_spd(link);
3810	}
3811
3812	/* issue phy wake/reset */
3813	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3814		goto out;
3815
3816	scontrol = (scontrol & 0x0f0) | 0x301;
3817
3818	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3819		goto out;
3820
3821	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3822	 * 10.4.2 says at least 1 ms.
3823	 */
3824	ata_msleep(link->ap, 1);
3825
3826	/* bring link back */
3827	rc = sata_link_resume(link, timing, deadline);
3828	if (rc)
3829		goto out;
3830	/* if link is offline nothing more to do */
3831	if (ata_phys_link_offline(link))
3832		goto out;
3833
3834	/* Link is online.  From this point, -ENODEV too is an error. */
3835	if (online)
3836		*online = true;
3837
3838	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3839		/* If PMP is supported, we have to do follow-up SRST.
3840		 * Some PMPs don't send D2H Reg FIS after hardreset if
3841		 * the first port is empty.  Wait only for
3842		 * ATA_TMOUT_PMP_SRST_WAIT.
3843		 */
3844		if (check_ready) {
3845			unsigned long pmp_deadline;
3846
3847			pmp_deadline = ata_deadline(jiffies,
3848						    ATA_TMOUT_PMP_SRST_WAIT);
3849			if (time_after(pmp_deadline, deadline))
3850				pmp_deadline = deadline;
3851			ata_wait_ready(link, pmp_deadline, check_ready);
3852		}
3853		rc = -EAGAIN;
3854		goto out;
3855	}
3856
3857	rc = 0;
3858	if (check_ready)
3859		rc = ata_wait_ready(link, deadline, check_ready);
3860 out:
3861	if (rc && rc != -EAGAIN) {
3862		/* online is set iff link is online && reset succeeded */
3863		if (online)
3864			*online = false;
3865		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3866	}
3867	DPRINTK("EXIT, rc=%d\n", rc);
3868	return rc;
3869}
3870
3871/**
3872 *	sata_std_hardreset - COMRESET w/o waiting or classification
3873 *	@link: link to reset
3874 *	@class: resulting class of attached device
3875 *	@deadline: deadline jiffies for the operation
3876 *
3877 *	Standard SATA COMRESET w/o waiting or classification.
3878 *
3879 *	LOCKING:
3880 *	Kernel thread context (may sleep)
3881 *
3882 *	RETURNS:
3883 *	0 if link offline, -EAGAIN if link online, -errno on errors.
3884 */
3885int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3886		       unsigned long deadline)
3887{
3888	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3889	bool online;
3890	int rc;
3891
3892	/* do hardreset */
3893	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3894	return online ? -EAGAIN : rc;
3895}
3896
3897/**
3898 *	ata_std_postreset - standard postreset callback
3899 *	@link: the target ata_link
3900 *	@classes: classes of attached devices
3901 *
3902 *	This function is invoked after a successful reset.  Note that
3903 *	the device might have been reset more than once using
3904 *	different reset methods before postreset is invoked.
3905 *
3906 *	LOCKING:
3907 *	Kernel thread context (may sleep)
3908 */
3909void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3910{
3911	u32 serror;
3912
3913	DPRINTK("ENTER\n");
3914
3915	/* reset complete, clear SError */
3916	if (!sata_scr_read(link, SCR_ERROR, &serror))
3917		sata_scr_write(link, SCR_ERROR, serror);
3918
3919	/* print link status */
3920	sata_print_link_status(link);
3921
3922	DPRINTK("EXIT\n");
3923}
3924
3925/**
3926 *	ata_dev_same_device - Determine whether new ID matches configured device
3927 *	@dev: device to compare against
3928 *	@new_class: class of the new device
3929 *	@new_id: IDENTIFY page of the new device
3930 *
3931 *	Compare @new_class and @new_id against @dev and determine
3932 *	whether @dev is the device indicated by @new_class and
3933 *	@new_id.
3934 *
3935 *	LOCKING:
3936 *	None.
3937 *
3938 *	RETURNS:
3939 *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3940 */
3941static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3942			       const u16 *new_id)
3943{
3944	const u16 *old_id = dev->id;
3945	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3946	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3947
3948	if (dev->class != new_class) {
3949		ata_dev_info(dev, "class mismatch %d != %d\n",
3950			     dev->class, new_class);
3951		return 0;
3952	}
3953
3954	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3955	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3956	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3957	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3958
3959	if (strcmp(model[0], model[1])) {
3960		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3961			     model[0], model[1]);
3962		return 0;
3963	}
3964
3965	if (strcmp(serial[0], serial[1])) {
3966		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3967			     serial[0], serial[1]);
3968		return 0;
3969	}
3970
3971	return 1;
3972}
3973
3974/**
3975 *	ata_dev_reread_id - Re-read IDENTIFY data
3976 *	@dev: target ATA device
3977 *	@readid_flags: read ID flags
3978 *
3979 *	Re-read IDENTIFY page and make sure @dev is still attached to
3980 *	the port.
3981 *
3982 *	LOCKING:
3983 *	Kernel thread context (may sleep)
3984 *
3985 *	RETURNS:
3986 *	0 on success, negative errno otherwise
3987 */
3988int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3989{
3990	unsigned int class = dev->class;
3991	u16 *id = (void *)dev->link->ap->sector_buf;
3992	int rc;
3993
3994	/* read ID data */
3995	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3996	if (rc)
3997		return rc;
3998
3999	/* is the device still there? */
4000	if (!ata_dev_same_device(dev, class, id))
4001		return -ENODEV;
4002
4003	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4004	return 0;
4005}
4006
4007/**
4008 *	ata_dev_revalidate - Revalidate ATA device
4009 *	@dev: device to revalidate
4010 *	@new_class: new class code
4011 *	@readid_flags: read ID flags
4012 *
4013 *	Re-read IDENTIFY page, make sure @dev is still attached to the
4014 *	port and reconfigure it according to the new IDENTIFY page.
4015 *
4016 *	LOCKING:
4017 *	Kernel thread context (may sleep)
4018 *
4019 *	RETURNS:
4020 *	0 on success, negative errno otherwise
4021 */
4022int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4023		       unsigned int readid_flags)
4024{
4025	u64 n_sectors = dev->n_sectors;
4026	u64 n_native_sectors = dev->n_native_sectors;
4027	int rc;
4028
4029	if (!ata_dev_enabled(dev))
4030		return -ENODEV;
4031
4032	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4033	if (ata_class_enabled(new_class) &&
4034	    new_class != ATA_DEV_ATA &&
4035	    new_class != ATA_DEV_ATAPI &&
4036	    new_class != ATA_DEV_SEMB) {
4037		ata_dev_info(dev, "class mismatch %u != %u\n",
4038			     dev->class, new_class);
4039		rc = -ENODEV;
4040		goto fail;
4041	}
4042
4043	/* re-read ID */
4044	rc = ata_dev_reread_id(dev, readid_flags);
4045	if (rc)
4046		goto fail;
4047
4048	/* configure device according to the new ID */
4049	rc = ata_dev_configure(dev);
4050	if (rc)
4051		goto fail;
4052
4053	/* verify n_sectors hasn't changed */
4054	if (dev->class != ATA_DEV_ATA || !n_sectors ||
4055	    dev->n_sectors == n_sectors)
4056		return 0;
4057
4058	/* n_sectors has changed */
4059	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4060		     (unsigned long long)n_sectors,
4061		     (unsigned long long)dev->n_sectors);
4062
4063	/*
4064	 * Something could have caused HPA to be unlocked
4065	 * involuntarily.  If n_native_sectors hasn't changed and the
4066	 * new size matches it, keep the device.
4067	 */
4068	if (dev->n_native_sectors == n_native_sectors &&
4069	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4070		ata_dev_warn(dev,
4071			     "new n_sectors matches native, probably "
4072			     "late HPA unlock, n_sectors updated\n");
4073		/* use the larger n_sectors */
4074		return 0;
4075	}
4076
4077	/*
4078	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4079	 * unlocking HPA in those cases.
4080	 *
4081	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4082	 */
4083	if (dev->n_native_sectors == n_native_sectors &&
4084	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4085	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4086		ata_dev_warn(dev,
4087			     "old n_sectors matches native, probably "
4088			     "late HPA lock, will try to unlock HPA\n");
4089		/* try unlocking HPA */
4090		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4091		rc = -EIO;
4092	} else
4093		rc = -ENODEV;
4094
4095	/* restore original n_[native_]sectors and fail */
4096	dev->n_native_sectors = n_native_sectors;
4097	dev->n_sectors = n_sectors;
4098 fail:
4099	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4100	return rc;
4101}
4102
4103struct ata_blacklist_entry {
4104	const char *model_num;
4105	const char *model_rev;
4106	unsigned long horkage;
4107};
4108
4109static const struct ata_blacklist_entry ata_device_blacklist [] = {
4110	/* Devices with DMA related problems under Linux */
4111	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4112	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4113	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4114	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4115	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4116	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4117	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4118	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4119	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4120	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
4121	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4122	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4123	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4124	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4125	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4126	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
4127	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4128	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4129	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4130	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4131	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4132	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4133	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4134	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4135	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4136	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4137	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4138	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4139	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
4140	/* Odd clown on sil3726/4726 PMPs */
4141	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4142
4143	/* Weird ATAPI devices */
4144	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4145	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4146	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4147	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4148
4149	/* Devices we expect to fail diagnostics */
4150
4151	/* Devices where NCQ should be avoided */
4152	/* NCQ is slow */
4153	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4154	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4155	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4156	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4157	/* NCQ is broken */
4158	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4159	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4160	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4161	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4162	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4163
4164	/* Seagate NCQ + FLUSH CACHE firmware bug */
4165	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4166						ATA_HORKAGE_FIRMWARE_WARN },
4167
4168	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4169						ATA_HORKAGE_FIRMWARE_WARN },
4170
4171	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4172						ATA_HORKAGE_FIRMWARE_WARN },
4173
4174	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4175						ATA_HORKAGE_FIRMWARE_WARN },
4176
4177	/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
4178	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
4179	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
4180
4181	/* Blacklist entries taken from Silicon Image 3124/3132
4182	   Windows driver .inf file - also several Linux problem reports */
4183	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4184	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4185	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4186
4187	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4188	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
4189
4190	/* devices which puke on READ_NATIVE_MAX */
4191	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4192	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4193	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4194	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4195
4196	/* this one allows HPA unlocking but fails IOs on the area */
4197	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4198
4199	/* Devices which report 1 sector over size HPA */
4200	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4201	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4202	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4203
4204	/* Devices which get the IVB wrong */
4205	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4206	/* Maybe we should just blacklist TSSTcorp... */
4207	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB, },
4208
4209	/* Devices that do not need bridging limits applied */
4210	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4211	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4212
4213	/* Devices which aren't very happy with higher link speeds */
4214	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
4215	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS, },
4216
4217	/*
4218	 * Devices which choke on SETXFER.  Applies only if both the
4219	 * device and controller are SATA.
4220	 */
4221	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
4222	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
4223	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
4224	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
4225	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
4226
4227	/* devices that don't properly handle queued TRIM commands */
4228	{ "Micron_M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
4229	{ "Crucial_CT???M500SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
4230	{ "Micron_M550*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
4231	{ "Crucial_CT*M550SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
4232
4233	/*
4234	 * Some WD SATA-I drives spin up and down erratically when the link
4235	 * is put into the slumber mode.  We don't have full list of the
4236	 * affected devices.  Disable LPM if the device matches one of the
4237	 * known prefixes and is SATA-1.  As a side effect LPM partial is
4238	 * lost too.
4239	 *
4240	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4241	 */
4242	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4243	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4244	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4245	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4246	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4247	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4248	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4249
4250	/* End Marker */
4251	{ }
4252};
4253
4254static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4255{
4256	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4257	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4258	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4259
4260	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4261	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4262
4263	while (ad->model_num) {
4264		if (glob_match(ad->model_num, model_num)) {
4265			if (ad->model_rev == NULL)
4266				return ad->horkage;
4267			if (glob_match(ad->model_rev, model_rev))
4268				return ad->horkage;
4269		}
4270		ad++;
4271	}
4272	return 0;
4273}
4274
4275static int ata_dma_blacklisted(const struct ata_device *dev)
4276{
4277	/* We don't support polling DMA.
4278	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4279	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4280	 */
4281	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4282	    (dev->flags & ATA_DFLAG_CDB_INTR))
4283		return 1;
4284	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4285}
4286
4287/**
4288 *	ata_is_40wire		-	check drive side detection
4289 *	@dev: device
4290 *
4291 *	Perform drive side detection decoding, allowing for device vendors
4292 *	who can't follow the documentation.
4293 */
4294
4295static int ata_is_40wire(struct ata_device *dev)
4296{
4297	if (dev->horkage & ATA_HORKAGE_IVB)
4298		return ata_drive_40wire_relaxed(dev->id);
4299	return ata_drive_40wire(dev->id);
4300}
4301
4302/**
4303 *	cable_is_40wire		-	40/80/SATA decider
4304 *	@ap: port to consider
4305 *
4306 *	This function encapsulates the policy for speed management
4307 *	in one place. At the moment we don't cache the result but
4308 *	there is a good case for setting ap->cbl to the result when
4309 *	we are called with unknown cables (and figuring out if it
4310 *	impacts hotplug at all).
4311 *
4312 *	Return 1 if the cable appears to be 40 wire.
4313 */
4314
4315static int cable_is_40wire(struct ata_port *ap)
4316{
4317	struct ata_link *link;
4318	struct ata_device *dev;
4319
4320	/* If the controller thinks we are 40 wire, we are. */
4321	if (ap->cbl == ATA_CBL_PATA40)
4322		return 1;
4323
4324	/* If the controller thinks we are 80 wire, we are. */
4325	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4326		return 0;
4327
4328	/* If the system is known to be 40 wire short cable (eg
4329	 * laptop), then we allow 80 wire modes even if the drive
4330	 * isn't sure.
4331	 */
4332	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4333		return 0;
4334
4335	/* If the controller doesn't know, we scan.
4336	 *
4337	 * Note: We look for all 40 wire detects at this point.  Any
4338	 *       80 wire detect is taken to be 80 wire cable because
4339	 * - in many setups only the one drive (slave if present) will
4340	 *   give a valid detect
4341	 * - if you have a non detect capable drive you don't want it
4342	 *   to colour the choice
4343	 */
4344	ata_for_each_link(link, ap, EDGE) {
4345		ata_for_each_dev(dev, link, ENABLED) {
4346			if (!ata_is_40wire(dev))
4347				return 0;
4348		}
4349	}
4350	return 1;
4351}
4352
4353/**
4354 *	ata_dev_xfermask - Compute supported xfermask of the given device
4355 *	@dev: Device to compute xfermask for
4356 *
4357 *	Compute supported xfermask of @dev and store it in
4358 *	dev->*_mask.  This function is responsible for applying all
4359 *	known limits including host controller limits, device
4360 *	blacklist, etc...
4361 *
4362 *	LOCKING:
4363 *	None.
4364 */
4365static void ata_dev_xfermask(struct ata_device *dev)
4366{
4367	struct ata_link *link = dev->link;
4368	struct ata_port *ap = link->ap;
4369	struct ata_host *host = ap->host;
4370	unsigned long xfer_mask;
4371
4372	/* controller modes available */
4373	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4374				      ap->mwdma_mask, ap->udma_mask);
4375
4376	/* drive modes available */
4377	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4378				       dev->mwdma_mask, dev->udma_mask);
4379	xfer_mask &= ata_id_xfermask(dev->id);
4380
4381	/*
4382	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4383	 *	cable
4384	 */
4385	if (ata_dev_pair(dev)) {
4386		/* No PIO5 or PIO6 */
4387		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4388		/* No MWDMA3 or MWDMA 4 */
4389		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4390	}
4391
4392	if (ata_dma_blacklisted(dev)) {
4393		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4394		ata_dev_warn(dev,
4395			     "device is on DMA blacklist, disabling DMA\n");
4396	}
4397
4398	if ((host->flags & ATA_HOST_SIMPLEX) &&
4399	    host->simplex_claimed && host->simplex_claimed != ap) {
4400		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4401		ata_dev_warn(dev,
4402			     "simplex DMA is claimed by other device, disabling DMA\n");
4403	}
4404
4405	if (ap->flags & ATA_FLAG_NO_IORDY)
4406		xfer_mask &= ata_pio_mask_no_iordy(dev);
4407
4408	if (ap->ops->mode_filter)
4409		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4410
4411	/* Apply cable rule here.  Don't apply it early because when
4412	 * we handle hot plug the cable type can itself change.
4413	 * Check this last so that we know if the transfer rate was
4414	 * solely limited by the cable.
4415	 * Unknown or 80 wire cables reported host side are checked
4416	 * drive side as well. Cases where we know a 40wire cable
4417	 * is used safely for 80 are not checked here.
4418	 */
4419	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4420		/* UDMA/44 or higher would be available */
4421		if (cable_is_40wire(ap)) {
4422			ata_dev_warn(dev,
4423				     "limited to UDMA/33 due to 40-wire cable\n");
4424			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4425		}
4426
4427	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4428			    &dev->mwdma_mask, &dev->udma_mask);
4429}
4430
4431/**
4432 *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4433 *	@dev: Device to which command will be sent
4434 *
4435 *	Issue SET FEATURES - XFER MODE command to device @dev
4436 *	on port @ap.
4437 *
4438 *	LOCKING:
4439 *	PCI/etc. bus probe sem.
4440 *
4441 *	RETURNS:
4442 *	0 on success, AC_ERR_* mask otherwise.
4443 */
4444
4445static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4446{
4447	struct ata_taskfile tf;
4448	unsigned int err_mask;
4449
4450	/* set up set-features taskfile */
4451	DPRINTK("set features - xfer mode\n");
4452
4453	/* Some controllers and ATAPI devices show flaky interrupt
4454	 * behavior after setting xfer mode.  Use polling instead.
4455	 */
4456	ata_tf_init(dev, &tf);
4457	tf.command = ATA_CMD_SET_FEATURES;
4458	tf.feature = SETFEATURES_XFER;
4459	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4460	tf.protocol = ATA_PROT_NODATA;
4461	/* If we are using IORDY we must send the mode setting command */
4462	if (ata_pio_need_iordy(dev))
4463		tf.nsect = dev->xfer_mode;
4464	/* If the device has IORDY and the controller does not - turn it off */
4465 	else if (ata_id_has_iordy(dev->id))
4466		tf.nsect = 0x01;
4467	else /* In the ancient relic department - skip all of this */
4468		return 0;
4469
4470	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4471
4472	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4473	return err_mask;
4474}
4475
4476/**
4477 *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4478 *	@dev: Device to which command will be sent
4479 *	@enable: Whether to enable or disable the feature
4480 *	@feature: The sector count represents the feature to set
4481 *
4482 *	Issue SET FEATURES - SATA FEATURES command to device @dev
4483 *	on port @ap with sector count
4484 *
4485 *	LOCKING:
4486 *	PCI/etc. bus probe sem.
4487 *
4488 *	RETURNS:
4489 *	0 on success, AC_ERR_* mask otherwise.
4490 */
4491unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4492{
4493	struct ata_taskfile tf;
4494	unsigned int err_mask;
4495
4496	/* set up set-features taskfile */
4497	DPRINTK("set features - SATA features\n");
4498
4499	ata_tf_init(dev, &tf);
4500	tf.command = ATA_CMD_SET_FEATURES;
4501	tf.feature = enable;
4502	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4503	tf.protocol = ATA_PROT_NODATA;
4504	tf.nsect = feature;
4505
4506	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4507
4508	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4509	return err_mask;
4510}
4511EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4512
4513/**
4514 *	ata_dev_init_params - Issue INIT DEV PARAMS command
4515 *	@dev: Device to which command will be sent
4516 *	@heads: Number of heads (taskfile parameter)
4517 *	@sectors: Number of sectors (taskfile parameter)
4518 *
4519 *	LOCKING:
4520 *	Kernel thread context (may sleep)
4521 *
4522 *	RETURNS:
4523 *	0 on success, AC_ERR_* mask otherwise.
4524 */
4525static unsigned int ata_dev_init_params(struct ata_device *dev,
4526					u16 heads, u16 sectors)
4527{
4528	struct ata_taskfile tf;
4529	unsigned int err_mask;
4530
4531	/* Number of sectors per track 1-255. Number of heads 1-16 */
4532	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4533		return AC_ERR_INVALID;
4534
4535	/* set up init dev params taskfile */
4536	DPRINTK("init dev params \n");
4537
4538	ata_tf_init(dev, &tf);
4539	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4540	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4541	tf.protocol = ATA_PROT_NODATA;
4542	tf.nsect = sectors;
4543	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4544
4545	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4546	/* A clean abort indicates an original or just out of spec drive
4547	   and we should continue as we issue the setup based on the
4548	   drive reported working geometry */
4549	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4550		err_mask = 0;
4551
4552	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4553	return err_mask;
4554}
4555
4556/**
4557 *	ata_sg_clean - Unmap DMA memory associated with command
4558 *	@qc: Command containing DMA memory to be released
4559 *
4560 *	Unmap all mapped DMA memory associated with this command.
4561 *
4562 *	LOCKING:
4563 *	spin_lock_irqsave(host lock)
4564 */
4565void ata_sg_clean(struct ata_queued_cmd *qc)
4566{
4567	struct ata_port *ap = qc->ap;
4568	struct scatterlist *sg = qc->sg;
4569	int dir = qc->dma_dir;
4570
4571	WARN_ON_ONCE(sg == NULL);
4572
4573	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4574
4575	if (qc->n_elem)
4576		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4577
4578	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4579	qc->sg = NULL;
4580}
4581
4582/**
4583 *	atapi_check_dma - Check whether ATAPI DMA can be supported
4584 *	@qc: Metadata associated with taskfile to check
4585 *
4586 *	Allow low-level driver to filter ATA PACKET commands, returning
4587 *	a status indicating whether or not it is OK to use DMA for the
4588 *	supplied PACKET command.
4589 *
4590 *	LOCKING:
4591 *	spin_lock_irqsave(host lock)
4592 *
4593 *	RETURNS: 0 when ATAPI DMA can be used
4594 *               nonzero otherwise
4595 */
4596int atapi_check_dma(struct ata_queued_cmd *qc)
4597{
4598	struct ata_port *ap = qc->ap;
4599
4600	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4601	 * few ATAPI devices choke on such DMA requests.
4602	 */
4603	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4604	    unlikely(qc->nbytes & 15))
4605		return 1;
4606
4607	if (ap->ops->check_atapi_dma)
4608		return ap->ops->check_atapi_dma(qc);
4609
4610	return 0;
4611}
4612
4613/**
4614 *	ata_std_qc_defer - Check whether a qc needs to be deferred
4615 *	@qc: ATA command in question
4616 *
4617 *	Non-NCQ commands cannot run with any other command, NCQ or
4618 *	not.  As upper layer only knows the queue depth, we are
4619 *	responsible for maintaining exclusion.  This function checks
4620 *	whether a new command @qc can be issued.
4621 *
4622 *	LOCKING:
4623 *	spin_lock_irqsave(host lock)
4624 *
4625 *	RETURNS:
4626 *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4627 */
4628int ata_std_qc_defer(struct ata_queued_cmd *qc)
4629{
4630	struct ata_link *link = qc->dev->link;
4631
4632	if (qc->tf.protocol == ATA_PROT_NCQ) {
4633		if (!ata_tag_valid(link->active_tag))
4634			return 0;
4635	} else {
4636		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4637			return 0;
4638	}
4639
4640	return ATA_DEFER_LINK;
4641}
4642
4643void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4644
4645/**
4646 *	ata_sg_init - Associate command with scatter-gather table.
4647 *	@qc: Command to be associated
4648 *	@sg: Scatter-gather table.
4649 *	@n_elem: Number of elements in s/g table.
4650 *
4651 *	Initialize the data-related elements of queued_cmd @qc
4652 *	to point to a scatter-gather table @sg, containing @n_elem
4653 *	elements.
4654 *
4655 *	LOCKING:
4656 *	spin_lock_irqsave(host lock)
4657 */
4658void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4659		 unsigned int n_elem)
4660{
4661	qc->sg = sg;
4662	qc->n_elem = n_elem;
4663	qc->cursg = qc->sg;
4664}
4665
4666/**
4667 *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4668 *	@qc: Command with scatter-gather table to be mapped.
4669 *
4670 *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4671 *
4672 *	LOCKING:
4673 *	spin_lock_irqsave(host lock)
4674 *
4675 *	RETURNS:
4676 *	Zero on success, negative on error.
4677 *
4678 */
4679static int ata_sg_setup(struct ata_queued_cmd *qc)
4680{
4681	struct ata_port *ap = qc->ap;
4682	unsigned int n_elem;
4683
4684	VPRINTK("ENTER, ata%u\n", ap->print_id);
4685
4686	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4687	if (n_elem < 1)
4688		return -1;
4689
4690	DPRINTK("%d sg elements mapped\n", n_elem);
4691	qc->orig_n_elem = qc->n_elem;
4692	qc->n_elem = n_elem;
4693	qc->flags |= ATA_QCFLAG_DMAMAP;
4694
4695	return 0;
4696}
4697
4698/**
4699 *	swap_buf_le16 - swap halves of 16-bit words in place
4700 *	@buf:  Buffer to swap
4701 *	@buf_words:  Number of 16-bit words in buffer.
4702 *
4703 *	Swap halves of 16-bit words if needed to convert from
4704 *	little-endian byte order to native cpu byte order, or
4705 *	vice-versa.
4706 *
4707 *	LOCKING:
4708 *	Inherited from caller.
4709 */
4710void swap_buf_le16(u16 *buf, unsigned int buf_words)
4711{
4712#ifdef __BIG_ENDIAN
4713	unsigned int i;
4714
4715	for (i = 0; i < buf_words; i++)
4716		buf[i] = le16_to_cpu(buf[i]);
4717#endif /* __BIG_ENDIAN */
4718}
4719
4720/**
4721 *	ata_qc_new - Request an available ATA command, for queueing
4722 *	@ap: target port
4723 *
4724 *	Some ATA host controllers may implement a queue depth which is less
4725 *	than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
4726 *	the hardware limitation.
4727 *
4728 *	LOCKING:
4729 *	None.
4730 */
4731
4732static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4733{
4734	struct ata_queued_cmd *qc = NULL;
4735	unsigned int max_queue = ap->host->n_tags;
4736	unsigned int i, tag;
4737
4738	/* no command while frozen */
4739	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4740		return NULL;
4741
4742	for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
4743		tag = tag < max_queue ? tag : 0;
4744
4745		/* the last tag is reserved for internal command. */
4746		if (tag == ATA_TAG_INTERNAL)
4747			continue;
4748
4749		if (!test_and_set_bit(tag, &ap->qc_allocated)) {
4750			qc = __ata_qc_from_tag(ap, tag);
4751			qc->tag = tag;
4752			ap->last_tag = tag;
4753			break;
4754		}
4755	}
4756
4757	return qc;
4758}
4759
4760/**
4761 *	ata_qc_new_init - Request an available ATA command, and initialize it
4762 *	@dev: Device from whom we request an available command structure
4763 *
4764 *	LOCKING:
4765 *	None.
4766 */
4767
4768struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4769{
4770	struct ata_port *ap = dev->link->ap;
4771	struct ata_queued_cmd *qc;
4772
4773	qc = ata_qc_new(ap);
4774	if (qc) {
4775		qc->scsicmd = NULL;
4776		qc->ap = ap;
4777		qc->dev = dev;
4778
4779		ata_qc_reinit(qc);
4780	}
4781
4782	return qc;
4783}
4784
4785/**
4786 *	ata_qc_free - free unused ata_queued_cmd
4787 *	@qc: Command to complete
4788 *
4789 *	Designed to free unused ata_queued_cmd object
4790 *	in case something prevents using it.
4791 *
4792 *	LOCKING:
4793 *	spin_lock_irqsave(host lock)
4794 */
4795void ata_qc_free(struct ata_queued_cmd *qc)
4796{
4797	struct ata_port *ap;
4798	unsigned int tag;
4799
4800	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4801	ap = qc->ap;
4802
4803	qc->flags = 0;
4804	tag = qc->tag;
4805	if (likely(ata_tag_valid(tag))) {
4806		qc->tag = ATA_TAG_POISON;
4807		clear_bit(tag, &ap->qc_allocated);
4808	}
4809}
4810
4811void __ata_qc_complete(struct ata_queued_cmd *qc)
4812{
4813	struct ata_port *ap;
4814	struct ata_link *link;
4815
4816	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4817	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4818	ap = qc->ap;
4819	link = qc->dev->link;
4820
4821	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4822		ata_sg_clean(qc);
4823
4824	/* command should be marked inactive atomically with qc completion */
4825	if (qc->tf.protocol == ATA_PROT_NCQ) {
4826		link->sactive &= ~(1 << qc->tag);
4827		if (!link->sactive)
4828			ap->nr_active_links--;
4829	} else {
4830		link->active_tag = ATA_TAG_POISON;
4831		ap->nr_active_links--;
4832	}
4833
4834	/* clear exclusive status */
4835	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4836		     ap->excl_link == link))
4837		ap->excl_link = NULL;
4838
4839	/* atapi: mark qc as inactive to prevent the interrupt handler
4840	 * from completing the command twice later, before the error handler
4841	 * is called. (when rc != 0 and atapi request sense is needed)
4842	 */
4843	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4844	ap->qc_active &= ~(1 << qc->tag);
4845
4846	/* call completion callback */
4847	qc->complete_fn(qc);
4848}
4849
4850static void fill_result_tf(struct ata_queued_cmd *qc)
4851{
4852	struct ata_port *ap = qc->ap;
4853
4854	qc->result_tf.flags = qc->tf.flags;
4855	ap->ops->qc_fill_rtf(qc);
4856}
4857
4858static void ata_verify_xfer(struct ata_queued_cmd *qc)
4859{
4860	struct ata_device *dev = qc->dev;
4861
4862	if (ata_is_nodata(qc->tf.protocol))
4863		return;
4864
4865	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4866		return;
4867
4868	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4869}
4870
4871/**
4872 *	ata_qc_complete - Complete an active ATA command
4873 *	@qc: Command to complete
4874 *
4875 *	Indicate to the mid and upper layers that an ATA command has
4876 *	completed, with either an ok or not-ok status.
4877 *
4878 *	Refrain from calling this function multiple times when
4879 *	successfully completing multiple NCQ commands.
4880 *	ata_qc_complete_multiple() should be used instead, which will
4881 *	properly update IRQ expect state.
4882 *
4883 *	LOCKING:
4884 *	spin_lock_irqsave(host lock)
4885 */
4886void ata_qc_complete(struct ata_queued_cmd *qc)
4887{
4888	struct ata_port *ap = qc->ap;
4889
4890	/* XXX: New EH and old EH use different mechanisms to
4891	 * synchronize EH with regular execution path.
4892	 *
4893	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4894	 * Normal execution path is responsible for not accessing a
4895	 * failed qc.  libata core enforces the rule by returning NULL
4896	 * from ata_qc_from_tag() for failed qcs.
4897	 *
4898	 * Old EH depends on ata_qc_complete() nullifying completion
4899	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4900	 * not synchronize with interrupt handler.  Only PIO task is
4901	 * taken care of.
4902	 */
4903	if (ap->ops->error_handler) {
4904		struct ata_device *dev = qc->dev;
4905		struct ata_eh_info *ehi = &dev->link->eh_info;
4906
4907		if (unlikely(qc->err_mask))
4908			qc->flags |= ATA_QCFLAG_FAILED;
4909
4910		/*
4911		 * Finish internal commands without any further processing
4912		 * and always with the result TF filled.
4913		 */
4914		if (unlikely(ata_tag_internal(qc->tag))) {
4915			fill_result_tf(qc);
4916			__ata_qc_complete(qc);
4917			return;
4918		}
4919
4920		/*
4921		 * Non-internal qc has failed.  Fill the result TF and
4922		 * summon EH.
4923		 */
4924		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4925			fill_result_tf(qc);
4926			ata_qc_schedule_eh(qc);
4927			return;
4928		}
4929
4930		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4931
4932		/* read result TF if requested */
4933		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4934			fill_result_tf(qc);
4935
4936		/* Some commands need post-processing after successful
4937		 * completion.
4938		 */
4939		switch (qc->tf.command) {
4940		case ATA_CMD_SET_FEATURES:
4941			if (qc->tf.feature != SETFEATURES_WC_ON &&
4942			    qc->tf.feature != SETFEATURES_WC_OFF)
4943				break;
4944			/* fall through */
4945		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4946		case ATA_CMD_SET_MULTI: /* multi_count changed */
4947			/* revalidate device */
4948			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4949			ata_port_schedule_eh(ap);
4950			break;
4951
4952		case ATA_CMD_SLEEP:
4953			dev->flags |= ATA_DFLAG_SLEEPING;
4954			break;
4955		}
4956
4957		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4958			ata_verify_xfer(qc);
4959
4960		__ata_qc_complete(qc);
4961	} else {
4962		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4963			return;
4964
4965		/* read result TF if failed or requested */
4966		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4967			fill_result_tf(qc);
4968
4969		__ata_qc_complete(qc);
4970	}
4971}
4972
4973/**
4974 *	ata_qc_complete_multiple - Complete multiple qcs successfully
4975 *	@ap: port in question
4976 *	@qc_active: new qc_active mask
4977 *
4978 *	Complete in-flight commands.  This functions is meant to be
4979 *	called from low-level driver's interrupt routine to complete
4980 *	requests normally.  ap->qc_active and @qc_active is compared
4981 *	and commands are completed accordingly.
4982 *
4983 *	Always use this function when completing multiple NCQ commands
4984 *	from IRQ handlers instead of calling ata_qc_complete()
4985 *	multiple times to keep IRQ expect status properly in sync.
4986 *
4987 *	LOCKING:
4988 *	spin_lock_irqsave(host lock)
4989 *
4990 *	RETURNS:
4991 *	Number of completed commands on success, -errno otherwise.
4992 */
4993int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4994{
4995	int nr_done = 0;
4996	u32 done_mask;
4997
4998	done_mask = ap->qc_active ^ qc_active;
4999
5000	if (unlikely(done_mask & qc_active)) {
5001		ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5002			     ap->qc_active, qc_active);
5003		return -EINVAL;
5004	}
5005
5006	while (done_mask) {
5007		struct ata_queued_cmd *qc;
5008		unsigned int tag = __ffs(done_mask);
5009
5010		qc = ata_qc_from_tag(ap, tag);
5011		if (qc) {
5012			ata_qc_complete(qc);
5013			nr_done++;
5014		}
5015		done_mask &= ~(1 << tag);
5016	}
5017
5018	return nr_done;
5019}
5020
5021/**
5022 *	ata_qc_issue - issue taskfile to device
5023 *	@qc: command to issue to device
5024 *
5025 *	Prepare an ATA command to submission to device.
5026 *	This includes mapping the data into a DMA-able
5027 *	area, filling in the S/G table, and finally
5028 *	writing the taskfile to hardware, starting the command.
5029 *
5030 *	LOCKING:
5031 *	spin_lock_irqsave(host lock)
5032 */
5033void ata_qc_issue(struct ata_queued_cmd *qc)
5034{
5035	struct ata_port *ap = qc->ap;
5036	struct ata_link *link = qc->dev->link;
5037	u8 prot = qc->tf.protocol;
5038
5039	/* Make sure only one non-NCQ command is outstanding.  The
5040	 * check is skipped for old EH because it reuses active qc to
5041	 * request ATAPI sense.
5042	 */
5043	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5044
5045	if (ata_is_ncq(prot)) {
5046		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5047
5048		if (!link->sactive)
5049			ap->nr_active_links++;
5050		link->sactive |= 1 << qc->tag;
5051	} else {
5052		WARN_ON_ONCE(link->sactive);
5053
5054		ap->nr_active_links++;
5055		link->active_tag = qc->tag;
5056	}
5057
5058	qc->flags |= ATA_QCFLAG_ACTIVE;
5059	ap->qc_active |= 1 << qc->tag;
5060
5061	/*
5062	 * We guarantee to LLDs that they will have at least one
5063	 * non-zero sg if the command is a data command.
5064	 */
5065	if (WARN_ON_ONCE(ata_is_data(prot) &&
5066			 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5067		goto sys_err;
5068
5069	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5070				 (ap->flags & ATA_FLAG_PIO_DMA)))
5071		if (ata_sg_setup(qc))
5072			goto sys_err;
5073
5074	/* if device is sleeping, schedule reset and abort the link */
5075	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5076		link->eh_info.action |= ATA_EH_RESET;
5077		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5078		ata_link_abort(link);
5079		return;
5080	}
5081
5082	ap->ops->qc_prep(qc);
5083
5084	qc->err_mask |= ap->ops->qc_issue(qc);
5085	if (unlikely(qc->err_mask))
5086		goto err;
5087	return;
5088
5089sys_err:
5090	qc->err_mask |= AC_ERR_SYSTEM;
5091err:
5092	ata_qc_complete(qc);
5093}
5094
5095/**
5096 *	sata_scr_valid - test whether SCRs are accessible
5097 *	@link: ATA link to test SCR accessibility for
5098 *
5099 *	Test whether SCRs are accessible for @link.
5100 *
5101 *	LOCKING:
5102 *	None.
5103 *
5104 *	RETURNS:
5105 *	1 if SCRs are accessible, 0 otherwise.
5106 */
5107int sata_scr_valid(struct ata_link *link)
5108{
5109	struct ata_port *ap = link->ap;
5110
5111	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5112}
5113
5114/**
5115 *	sata_scr_read - read SCR register of the specified port
5116 *	@link: ATA link to read SCR for
5117 *	@reg: SCR to read
5118 *	@val: Place to store read value
5119 *
5120 *	Read SCR register @reg of @link into *@val.  This function is
5121 *	guaranteed to succeed if @link is ap->link, the cable type of
5122 *	the port is SATA and the port implements ->scr_read.
5123 *
5124 *	LOCKING:
5125 *	None if @link is ap->link.  Kernel thread context otherwise.
5126 *
5127 *	RETURNS:
5128 *	0 on success, negative errno on failure.
5129 */
5130int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5131{
5132	if (ata_is_host_link(link)) {
5133		if (sata_scr_valid(link))
5134			return link->ap->ops->scr_read(link, reg, val);
5135		return -EOPNOTSUPP;
5136	}
5137
5138	return sata_pmp_scr_read(link, reg, val);
5139}
5140
5141/**
5142 *	sata_scr_write - write SCR register of the specified port
5143 *	@link: ATA link to write SCR for
5144 *	@reg: SCR to write
5145 *	@val: value to write
5146 *
5147 *	Write @val to SCR register @reg of @link.  This function is
5148 *	guaranteed to succeed if @link is ap->link, the cable type of
5149 *	the port is SATA and the port implements ->scr_read.
5150 *
5151 *	LOCKING:
5152 *	None if @link is ap->link.  Kernel thread context otherwise.
5153 *
5154 *	RETURNS:
5155 *	0 on success, negative errno on failure.
5156 */
5157int sata_scr_write(struct ata_link *link, int reg, u32 val)
5158{
5159	if (ata_is_host_link(link)) {
5160		if (sata_scr_valid(link))
5161			return link->ap->ops->scr_write(link, reg, val);
5162		return -EOPNOTSUPP;
5163	}
5164
5165	return sata_pmp_scr_write(link, reg, val);
5166}
5167
5168/**
5169 *	sata_scr_write_flush - write SCR register of the specified port and flush
5170 *	@link: ATA link to write SCR for
5171 *	@reg: SCR to write
5172 *	@val: value to write
5173 *
5174 *	This function is identical to sata_scr_write() except that this
5175 *	function performs flush after writing to the register.
5176 *
5177 *	LOCKING:
5178 *	None if @link is ap->link.  Kernel thread context otherwise.
5179 *
5180 *	RETURNS:
5181 *	0 on success, negative errno on failure.
5182 */
5183int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5184{
5185	if (ata_is_host_link(link)) {
5186		int rc;
5187
5188		if (sata_scr_valid(link)) {
5189			rc = link->ap->ops->scr_write(link, reg, val);
5190			if (rc == 0)
5191				rc = link->ap->ops->scr_read(link, reg, &val);
5192			return rc;
5193		}
5194		return -EOPNOTSUPP;
5195	}
5196
5197	return sata_pmp_scr_write(link, reg, val);
5198}
5199
5200/**
5201 *	ata_phys_link_online - test whether the given link is online
5202 *	@link: ATA link to test
5203 *
5204 *	Test whether @link is online.  Note that this function returns
5205 *	0 if online status of @link cannot be obtained, so
5206 *	ata_link_online(link) != !ata_link_offline(link).
5207 *
5208 *	LOCKING:
5209 *	None.
5210 *
5211 *	RETURNS:
5212 *	True if the port online status is available and online.
5213 */
5214bool ata_phys_link_online(struct ata_link *link)
5215{
5216	u32 sstatus;
5217
5218	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5219	    ata_sstatus_online(sstatus))
5220		return true;
5221	return false;
5222}
5223
5224/**
5225 *	ata_phys_link_offline - test whether the given link is offline
5226 *	@link: ATA link to test
5227 *
5228 *	Test whether @link is offline.  Note that this function
5229 *	returns 0 if offline status of @link cannot be obtained, so
5230 *	ata_link_online(link) != !ata_link_offline(link).
5231 *
5232 *	LOCKING:
5233 *	None.
5234 *
5235 *	RETURNS:
5236 *	True if the port offline status is available and offline.
5237 */
5238bool ata_phys_link_offline(struct ata_link *link)
5239{
5240	u32 sstatus;
5241
5242	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5243	    !ata_sstatus_online(sstatus))
5244		return true;
5245	return false;
5246}
5247
5248/**
5249 *	ata_link_online - test whether the given link is online
5250 *	@link: ATA link to test
5251 *
5252 *	Test whether @link is online.  This is identical to
5253 *	ata_phys_link_online() when there's no slave link.  When
5254 *	there's a slave link, this function should only be called on
5255 *	the master link and will return true if any of M/S links is
5256 *	online.
5257 *
5258 *	LOCKING:
5259 *	None.
5260 *
5261 *	RETURNS:
5262 *	True if the port online status is available and online.
5263 */
5264bool ata_link_online(struct ata_link *link)
5265{
5266	struct ata_link *slave = link->ap->slave_link;
5267
5268	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5269
5270	return ata_phys_link_online(link) ||
5271		(slave && ata_phys_link_online(slave));
5272}
5273
5274/**
5275 *	ata_link_offline - test whether the given link is offline
5276 *	@link: ATA link to test
5277 *
5278 *	Test whether @link is offline.  This is identical to
5279 *	ata_phys_link_offline() when there's no slave link.  When
5280 *	there's a slave link, this function should only be called on
5281 *	the master link and will return true if both M/S links are
5282 *	offline.
5283 *
5284 *	LOCKING:
5285 *	None.
5286 *
5287 *	RETURNS:
5288 *	True if the port offline status is available and offline.
5289 */
5290bool ata_link_offline(struct ata_link *link)
5291{
5292	struct ata_link *slave = link->ap->slave_link;
5293
5294	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5295
5296	return ata_phys_link_offline(link) &&
5297		(!slave || ata_phys_link_offline(slave));
5298}
5299
5300#ifdef CONFIG_PM
5301static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5302				unsigned int action, unsigned int ehi_flags,
5303				bool async)
5304{
5305	struct ata_link *link;
5306	unsigned long flags;
5307
5308	/* Previous resume operation might still be in
5309	 * progress.  Wait for PM_PENDING to clear.
5310	 */
5311	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5312		ata_port_wait_eh(ap);
5313		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5314	}
5315
5316	/* request PM ops to EH */
5317	spin_lock_irqsave(ap->lock, flags);
5318
5319	ap->pm_mesg = mesg;
5320	ap->pflags |= ATA_PFLAG_PM_PENDING;
5321	ata_for_each_link(link, ap, HOST_FIRST) {
5322		link->eh_info.action |= action;
5323		link->eh_info.flags |= ehi_flags;
5324	}
5325
5326	ata_port_schedule_eh(ap);
5327
5328	spin_unlock_irqrestore(ap->lock, flags);
5329
5330	if (!async) {
5331		ata_port_wait_eh(ap);
5332		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5333	}
5334}
5335
5336/*
5337 * On some hardware, device fails to respond after spun down for suspend.  As
5338 * the device won't be used before being resumed, we don't need to touch the
5339 * device.  Ask EH to skip the usual stuff and proceed directly to suspend.
5340 *
5341 * http://thread.gmane.org/gmane.linux.ide/46764
5342 */
5343static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5344						 | ATA_EHI_NO_AUTOPSY
5345						 | ATA_EHI_NO_RECOVERY;
5346
5347static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5348{
5349	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5350}
5351
5352static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5353{
5354	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5355}
5356
5357static int ata_port_pm_suspend(struct device *dev)
5358{
5359	struct ata_port *ap = to_ata_port(dev);
5360
5361	if (pm_runtime_suspended(dev))
5362		return 0;
5363
5364	ata_port_suspend(ap, PMSG_SUSPEND);
5365	return 0;
5366}
5367
5368static int ata_port_pm_freeze(struct device *dev)
5369{
5370	struct ata_port *ap = to_ata_port(dev);
5371
5372	if (pm_runtime_suspended(dev))
5373		return 0;
5374
5375	ata_port_suspend(ap, PMSG_FREEZE);
5376	return 0;
5377}
5378
5379static int ata_port_pm_poweroff(struct device *dev)
5380{
5381	ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5382	return 0;
5383}
5384
5385static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5386						| ATA_EHI_QUIET;
5387
5388static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5389{
5390	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5391}
5392
5393static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5394{
5395	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5396}
5397
5398static int ata_port_pm_resume(struct device *dev)
5399{
5400	ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5401	pm_runtime_disable(dev);
5402	pm_runtime_set_active(dev);
5403	pm_runtime_enable(dev);
5404	return 0;
5405}
5406
5407/*
5408 * For ODDs, the upper layer will poll for media change every few seconds,
5409 * which will make it enter and leave suspend state every few seconds. And
5410 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5411 * is very little and the ODD may malfunction after constantly being reset.
5412 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5413 * ODD is attached to the port.
5414 */
5415static int ata_port_runtime_idle(struct device *dev)
5416{
5417	struct ata_port *ap = to_ata_port(dev);
5418	struct ata_link *link;
5419	struct ata_device *adev;
5420
5421	ata_for_each_link(link, ap, HOST_FIRST) {
5422		ata_for_each_dev(adev, link, ENABLED)
5423			if (adev->class == ATA_DEV_ATAPI &&
5424			    !zpodd_dev_enabled(adev))
5425				return -EBUSY;
5426	}
5427
5428	return 0;
5429}
5430
5431static int ata_port_runtime_suspend(struct device *dev)
5432{
5433	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5434	return 0;
5435}
5436
5437static int ata_port_runtime_resume(struct device *dev)
5438{
5439	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5440	return 0;
5441}
5442
5443static const struct dev_pm_ops ata_port_pm_ops = {
5444	.suspend = ata_port_pm_suspend,
5445	.resume = ata_port_pm_resume,
5446	.freeze = ata_port_pm_freeze,
5447	.thaw = ata_port_pm_resume,
5448	.poweroff = ata_port_pm_poweroff,
5449	.restore = ata_port_pm_resume,
5450
5451	.runtime_suspend = ata_port_runtime_suspend,
5452	.runtime_resume = ata_port_runtime_resume,
5453	.runtime_idle = ata_port_runtime_idle,
5454};
5455
5456/* sas ports don't participate in pm runtime management of ata_ports,
5457 * and need to resume ata devices at the domain level, not the per-port
5458 * level. sas suspend/resume is async to allow parallel port recovery
5459 * since sas has multiple ata_port instances per Scsi_Host.
5460 */
5461void ata_sas_port_suspend(struct ata_port *ap)
5462{
5463	ata_port_suspend_async(ap, PMSG_SUSPEND);
5464}
5465EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5466
5467void ata_sas_port_resume(struct ata_port *ap)
5468{
5469	ata_port_resume_async(ap, PMSG_RESUME);
5470}
5471EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5472
5473/**
5474 *	ata_host_suspend - suspend host
5475 *	@host: host to suspend
5476 *	@mesg: PM message
5477 *
5478 *	Suspend @host.  Actual operation is performed by port suspend.
5479 */
5480int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5481{
5482	host->dev->power.power_state = mesg;
5483	return 0;
5484}
5485
5486/**
5487 *	ata_host_resume - resume host
5488 *	@host: host to resume
5489 *
5490 *	Resume @host.  Actual operation is performed by port resume.
5491 */
5492void ata_host_resume(struct ata_host *host)
5493{
5494	host->dev->power.power_state = PMSG_ON;
5495}
5496#endif
5497
5498struct device_type ata_port_type = {
5499	.name = "ata_port",
5500#ifdef CONFIG_PM
5501	.pm = &ata_port_pm_ops,
5502#endif
5503};
5504
5505/**
5506 *	ata_dev_init - Initialize an ata_device structure
5507 *	@dev: Device structure to initialize
5508 *
5509 *	Initialize @dev in preparation for probing.
5510 *
5511 *	LOCKING:
5512 *	Inherited from caller.
5513 */
5514void ata_dev_init(struct ata_device *dev)
5515{
5516	struct ata_link *link = ata_dev_phys_link(dev);
5517	struct ata_port *ap = link->ap;
5518	unsigned long flags;
5519
5520	/* SATA spd limit is bound to the attached device, reset together */
5521	link->sata_spd_limit = link->hw_sata_spd_limit;
5522	link->sata_spd = 0;
5523
5524	/* High bits of dev->flags are used to record warm plug
5525	 * requests which occur asynchronously.  Synchronize using
5526	 * host lock.
5527	 */
5528	spin_lock_irqsave(ap->lock, flags);
5529	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5530	dev->horkage = 0;
5531	spin_unlock_irqrestore(ap->lock, flags);
5532
5533	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5534	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5535	dev->pio_mask = UINT_MAX;
5536	dev->mwdma_mask = UINT_MAX;
5537	dev->udma_mask = UINT_MAX;
5538}
5539
5540/**
5541 *	ata_link_init - Initialize an ata_link structure
5542 *	@ap: ATA port link is attached to
5543 *	@link: Link structure to initialize
5544 *	@pmp: Port multiplier port number
5545 *
5546 *	Initialize @link.
5547 *
5548 *	LOCKING:
5549 *	Kernel thread context (may sleep)
5550 */
5551void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5552{
5553	int i;
5554
5555	/* clear everything except for devices */
5556	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5557	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5558
5559	link->ap = ap;
5560	link->pmp = pmp;
5561	link->active_tag = ATA_TAG_POISON;
5562	link->hw_sata_spd_limit = UINT_MAX;
5563
5564	/* can't use iterator, ap isn't initialized yet */
5565	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5566		struct ata_device *dev = &link->device[i];
5567
5568		dev->link = link;
5569		dev->devno = dev - link->device;
5570#ifdef CONFIG_ATA_ACPI
5571		dev->gtf_filter = ata_acpi_gtf_filter;
5572#endif
5573		ata_dev_init(dev);
5574	}
5575}
5576
5577/**
5578 *	sata_link_init_spd - Initialize link->sata_spd_limit
5579 *	@link: Link to configure sata_spd_limit for
5580 *
5581 *	Initialize @link->[hw_]sata_spd_limit to the currently
5582 *	configured value.
5583 *
5584 *	LOCKING:
5585 *	Kernel thread context (may sleep).
5586 *
5587 *	RETURNS:
5588 *	0 on success, -errno on failure.
5589 */
5590int sata_link_init_spd(struct ata_link *link)
5591{
5592	u8 spd;
5593	int rc;
5594
5595	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5596	if (rc)
5597		return rc;
5598
5599	spd = (link->saved_scontrol >> 4) & 0xf;
5600	if (spd)
5601		link->hw_sata_spd_limit &= (1 << spd) - 1;
5602
5603	ata_force_link_limits(link);
5604
5605	link->sata_spd_limit = link->hw_sata_spd_limit;
5606
5607	return 0;
5608}
5609
5610/**
5611 *	ata_port_alloc - allocate and initialize basic ATA port resources
5612 *	@host: ATA host this allocated port belongs to
5613 *
5614 *	Allocate and initialize basic ATA port resources.
5615 *
5616 *	RETURNS:
5617 *	Allocate ATA port on success, NULL on failure.
5618 *
5619 *	LOCKING:
5620 *	Inherited from calling layer (may sleep).
5621 */
5622struct ata_port *ata_port_alloc(struct ata_host *host)
5623{
5624	struct ata_port *ap;
5625
5626	DPRINTK("ENTER\n");
5627
5628	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5629	if (!ap)
5630		return NULL;
5631
5632	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5633	ap->lock = &host->lock;
5634	ap->print_id = -1;
5635	ap->local_port_no = -1;
5636	ap->host = host;
5637	ap->dev = host->dev;
5638
5639#if defined(ATA_VERBOSE_DEBUG)
5640	/* turn on all debugging levels */
5641	ap->msg_enable = 0x00FF;
5642#elif defined(ATA_DEBUG)
5643	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5644#else
5645	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5646#endif
5647
5648	mutex_init(&ap->scsi_scan_mutex);
5649	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5650	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5651	INIT_LIST_HEAD(&ap->eh_done_q);
5652	init_waitqueue_head(&ap->eh_wait_q);
5653	init_completion(&ap->park_req_pending);
5654	init_timer_deferrable(&ap->fastdrain_timer);
5655	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5656	ap->fastdrain_timer.data = (unsigned long)ap;
5657
5658	ap->cbl = ATA_CBL_NONE;
5659
5660	ata_link_init(ap, &ap->link, 0);
5661
5662#ifdef ATA_IRQ_TRAP
5663	ap->stats.unhandled_irq = 1;
5664	ap->stats.idle_irq = 1;
5665#endif
5666	ata_sff_port_init(ap);
5667
5668	return ap;
5669}
5670
5671static void ata_host_release(struct device *gendev, void *res)
5672{
5673	struct ata_host *host = dev_get_drvdata(gendev);
5674	int i;
5675
5676	for (i = 0; i < host->n_ports; i++) {
5677		struct ata_port *ap = host->ports[i];
5678
5679		if (!ap)
5680			continue;
5681
5682		if (ap->scsi_host)
5683			scsi_host_put(ap->scsi_host);
5684
5685		kfree(ap->pmp_link);
5686		kfree(ap->slave_link);
5687		kfree(ap);
5688		host->ports[i] = NULL;
5689	}
5690
5691	dev_set_drvdata(gendev, NULL);
5692}
5693
5694/**
5695 *	ata_host_alloc - allocate and init basic ATA host resources
5696 *	@dev: generic device this host is associated with
5697 *	@max_ports: maximum number of ATA ports associated with this host
5698 *
5699 *	Allocate and initialize basic ATA host resources.  LLD calls
5700 *	this function to allocate a host, initializes it fully and
5701 *	attaches it using ata_host_register().
5702 *
5703 *	@max_ports ports are allocated and host->n_ports is
5704 *	initialized to @max_ports.  The caller is allowed to decrease
5705 *	host->n_ports before calling ata_host_register().  The unused
5706 *	ports will be automatically freed on registration.
5707 *
5708 *	RETURNS:
5709 *	Allocate ATA host on success, NULL on failure.
5710 *
5711 *	LOCKING:
5712 *	Inherited from calling layer (may sleep).
5713 */
5714struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5715{
5716	struct ata_host *host;
5717	size_t sz;
5718	int i;
5719
5720	DPRINTK("ENTER\n");
5721
5722	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5723		return NULL;
5724
5725	/* alloc a container for our list of ATA ports (buses) */
5726	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5727	/* alloc a container for our list of ATA ports (buses) */
5728	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5729	if (!host)
5730		goto err_out;
5731
5732	devres_add(dev, host);
5733	dev_set_drvdata(dev, host);
5734
5735	spin_lock_init(&host->lock);
5736	mutex_init(&host->eh_mutex);
5737	host->dev = dev;
5738	host->n_ports = max_ports;
5739
5740	/* allocate ports bound to this host */
5741	for (i = 0; i < max_ports; i++) {
5742		struct ata_port *ap;
5743
5744		ap = ata_port_alloc(host);
5745		if (!ap)
5746			goto err_out;
5747
5748		ap->port_no = i;
5749		host->ports[i] = ap;
5750	}
5751
5752	devres_remove_group(dev, NULL);
5753	return host;
5754
5755 err_out:
5756	devres_release_group(dev, NULL);
5757	return NULL;
5758}
5759
5760/**
5761 *	ata_host_alloc_pinfo - alloc host and init with port_info array
5762 *	@dev: generic device this host is associated with
5763 *	@ppi: array of ATA port_info to initialize host with
5764 *	@n_ports: number of ATA ports attached to this host
5765 *
5766 *	Allocate ATA host and initialize with info from @ppi.  If NULL
5767 *	terminated, @ppi may contain fewer entries than @n_ports.  The
5768 *	last entry will be used for the remaining ports.
5769 *
5770 *	RETURNS:
5771 *	Allocate ATA host on success, NULL on failure.
5772 *
5773 *	LOCKING:
5774 *	Inherited from calling layer (may sleep).
5775 */
5776struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5777				      const struct ata_port_info * const * ppi,
5778				      int n_ports)
5779{
5780	const struct ata_port_info *pi;
5781	struct ata_host *host;
5782	int i, j;
5783
5784	host = ata_host_alloc(dev, n_ports);
5785	if (!host)
5786		return NULL;
5787
5788	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5789		struct ata_port *ap = host->ports[i];
5790
5791		if (ppi[j])
5792			pi = ppi[j++];
5793
5794		ap->pio_mask = pi->pio_mask;
5795		ap->mwdma_mask = pi->mwdma_mask;
5796		ap->udma_mask = pi->udma_mask;
5797		ap->flags |= pi->flags;
5798		ap->link.flags |= pi->link_flags;
5799		ap->ops = pi->port_ops;
5800
5801		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5802			host->ops = pi->port_ops;
5803	}
5804
5805	return host;
5806}
5807
5808/**
5809 *	ata_slave_link_init - initialize slave link
5810 *	@ap: port to initialize slave link for
5811 *
5812 *	Create and initialize slave link for @ap.  This enables slave
5813 *	link handling on the port.
5814 *
5815 *	In libata, a port contains links and a link contains devices.
5816 *	There is single host link but if a PMP is attached to it,
5817 *	there can be multiple fan-out links.  On SATA, there's usually
5818 *	a single device connected to a link but PATA and SATA
5819 *	controllers emulating TF based interface can have two - master
5820 *	and slave.
5821 *
5822 *	However, there are a few controllers which don't fit into this
5823 *	abstraction too well - SATA controllers which emulate TF
5824 *	interface with both master and slave devices but also have
5825 *	separate SCR register sets for each device.  These controllers
5826 *	need separate links for physical link handling
5827 *	(e.g. onlineness, link speed) but should be treated like a
5828 *	traditional M/S controller for everything else (e.g. command
5829 *	issue, softreset).
5830 *
5831 *	slave_link is libata's way of handling this class of
5832 *	controllers without impacting core layer too much.  For
5833 *	anything other than physical link handling, the default host
5834 *	link is used for both master and slave.  For physical link
5835 *	handling, separate @ap->slave_link is used.  All dirty details
5836 *	are implemented inside libata core layer.  From LLD's POV, the
5837 *	only difference is that prereset, hardreset and postreset are
5838 *	called once more for the slave link, so the reset sequence
5839 *	looks like the following.
5840 *
5841 *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5842 *	softreset(M) -> postreset(M) -> postreset(S)
5843 *
5844 *	Note that softreset is called only for the master.  Softreset
5845 *	resets both M/S by definition, so SRST on master should handle
5846 *	both (the standard method will work just fine).
5847 *
5848 *	LOCKING:
5849 *	Should be called before host is registered.
5850 *
5851 *	RETURNS:
5852 *	0 on success, -errno on failure.
5853 */
5854int ata_slave_link_init(struct ata_port *ap)
5855{
5856	struct ata_link *link;
5857
5858	WARN_ON(ap->slave_link);
5859	WARN_ON(ap->flags & ATA_FLAG_PMP);
5860
5861	link = kzalloc(sizeof(*link), GFP_KERNEL);
5862	if (!link)
5863		return -ENOMEM;
5864
5865	ata_link_init(ap, link, 1);
5866	ap->slave_link = link;
5867	return 0;
5868}
5869
5870static void ata_host_stop(struct device *gendev, void *res)
5871{
5872	struct ata_host *host = dev_get_drvdata(gendev);
5873	int i;
5874
5875	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5876
5877	for (i = 0; i < host->n_ports; i++) {
5878		struct ata_port *ap = host->ports[i];
5879
5880		if (ap->ops->port_stop)
5881			ap->ops->port_stop(ap);
5882	}
5883
5884	if (host->ops->host_stop)
5885		host->ops->host_stop(host);
5886}
5887
5888/**
5889 *	ata_finalize_port_ops - finalize ata_port_operations
5890 *	@ops: ata_port_operations to finalize
5891 *
5892 *	An ata_port_operations can inherit from another ops and that
5893 *	ops can again inherit from another.  This can go on as many
5894 *	times as necessary as long as there is no loop in the
5895 *	inheritance chain.
5896 *
5897 *	Ops tables are finalized when the host is started.  NULL or
5898 *	unspecified entries are inherited from the closet ancestor
5899 *	which has the method and the entry is populated with it.
5900 *	After finalization, the ops table directly points to all the
5901 *	methods and ->inherits is no longer necessary and cleared.
5902 *
5903 *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5904 *
5905 *	LOCKING:
5906 *	None.
5907 */
5908static void ata_finalize_port_ops(struct ata_port_operations *ops)
5909{
5910	static DEFINE_SPINLOCK(lock);
5911	const struct ata_port_operations *cur;
5912	void **begin = (void **)ops;
5913	void **end = (void **)&ops->inherits;
5914	void **pp;
5915
5916	if (!ops || !ops->inherits)
5917		return;
5918
5919	spin_lock(&lock);
5920
5921	for (cur = ops->inherits; cur; cur = cur->inherits) {
5922		void **inherit = (void **)cur;
5923
5924		for (pp = begin; pp < end; pp++, inherit++)
5925			if (!*pp)
5926				*pp = *inherit;
5927	}
5928
5929	for (pp = begin; pp < end; pp++)
5930		if (IS_ERR(*pp))
5931			*pp = NULL;
5932
5933	ops->inherits = NULL;
5934
5935	spin_unlock(&lock);
5936}
5937
5938/**
5939 *	ata_host_start - start and freeze ports of an ATA host
5940 *	@host: ATA host to start ports for
5941 *
5942 *	Start and then freeze ports of @host.  Started status is
5943 *	recorded in host->flags, so this function can be called
5944 *	multiple times.  Ports are guaranteed to get started only
5945 *	once.  If host->ops isn't initialized yet, its set to the
5946 *	first non-dummy port ops.
5947 *
5948 *	LOCKING:
5949 *	Inherited from calling layer (may sleep).
5950 *
5951 *	RETURNS:
5952 *	0 if all ports are started successfully, -errno otherwise.
5953 */
5954int ata_host_start(struct ata_host *host)
5955{
5956	int have_stop = 0;
5957	void *start_dr = NULL;
5958	int i, rc;
5959
5960	if (host->flags & ATA_HOST_STARTED)
5961		return 0;
5962
5963	ata_finalize_port_ops(host->ops);
5964
5965	for (i = 0; i < host->n_ports; i++) {
5966		struct ata_port *ap = host->ports[i];
5967
5968		ata_finalize_port_ops(ap->ops);
5969
5970		if (!host->ops && !ata_port_is_dummy(ap))
5971			host->ops = ap->ops;
5972
5973		if (ap->ops->port_stop)
5974			have_stop = 1;
5975	}
5976
5977	if (host->ops->host_stop)
5978		have_stop = 1;
5979
5980	if (have_stop) {
5981		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5982		if (!start_dr)
5983			return -ENOMEM;
5984	}
5985
5986	for (i = 0; i < host->n_ports; i++) {
5987		struct ata_port *ap = host->ports[i];
5988
5989		if (ap->ops->port_start) {
5990			rc = ap->ops->port_start(ap);
5991			if (rc) {
5992				if (rc != -ENODEV)
5993					dev_err(host->dev,
5994						"failed to start port %d (errno=%d)\n",
5995						i, rc);
5996				goto err_out;
5997			}
5998		}
5999		ata_eh_freeze_port(ap);
6000	}
6001
6002	if (start_dr)
6003		devres_add(host->dev, start_dr);
6004	host->flags |= ATA_HOST_STARTED;
6005	return 0;
6006
6007 err_out:
6008	while (--i >= 0) {
6009		struct ata_port *ap = host->ports[i];
6010
6011		if (ap->ops->port_stop)
6012			ap->ops->port_stop(ap);
6013	}
6014	devres_free(start_dr);
6015	return rc;
6016}
6017
6018/**
6019 *	ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6020 *	@host:	host to initialize
6021 *	@dev:	device host is attached to
6022 *	@ops:	port_ops
6023 *
6024 */
6025void ata_host_init(struct ata_host *host, struct device *dev,
6026		   struct ata_port_operations *ops)
6027{
6028	spin_lock_init(&host->lock);
6029	mutex_init(&host->eh_mutex);
6030	host->n_tags = ATA_MAX_QUEUE - 1;
6031	host->dev = dev;
6032	host->ops = ops;
6033}
6034
6035void __ata_port_probe(struct ata_port *ap)
6036{
6037	struct ata_eh_info *ehi = &ap->link.eh_info;
6038	unsigned long flags;
6039
6040	/* kick EH for boot probing */
6041	spin_lock_irqsave(ap->lock, flags);
6042
6043	ehi->probe_mask |= ATA_ALL_DEVICES;
6044	ehi->action |= ATA_EH_RESET;
6045	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6046
6047	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6048	ap->pflags |= ATA_PFLAG_LOADING;
6049	ata_port_schedule_eh(ap);
6050
6051	spin_unlock_irqrestore(ap->lock, flags);
6052}
6053
6054int ata_port_probe(struct ata_port *ap)
6055{
6056	int rc = 0;
6057
6058	if (ap->ops->error_handler) {
6059		__ata_port_probe(ap);
6060		ata_port_wait_eh(ap);
6061	} else {
6062		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6063		rc = ata_bus_probe(ap);
6064		DPRINTK("ata%u: bus probe end\n", ap->print_id);
6065	}
6066	return rc;
6067}
6068
6069
6070static void async_port_probe(void *data, async_cookie_t cookie)
6071{
6072	struct ata_port *ap = data;
6073
6074	/*
6075	 * If we're not allowed to scan this host in parallel,
6076	 * we need to wait until all previous scans have completed
6077	 * before going further.
6078	 * Jeff Garzik says this is only within a controller, so we
6079	 * don't need to wait for port 0, only for later ports.
6080	 */
6081	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6082		async_synchronize_cookie(cookie);
6083
6084	(void)ata_port_probe(ap);
6085
6086	/* in order to keep device order, we need to synchronize at this point */
6087	async_synchronize_cookie(cookie);
6088
6089	ata_scsi_scan_host(ap, 1);
6090}
6091
6092/**
6093 *	ata_host_register - register initialized ATA host
6094 *	@host: ATA host to register
6095 *	@sht: template for SCSI host
6096 *
6097 *	Register initialized ATA host.  @host is allocated using
6098 *	ata_host_alloc() and fully initialized by LLD.  This function
6099 *	starts ports, registers @host with ATA and SCSI layers and
6100 *	probe registered devices.
6101 *
6102 *	LOCKING:
6103 *	Inherited from calling layer (may sleep).
6104 *
6105 *	RETURNS:
6106 *	0 on success, -errno otherwise.
6107 */
6108int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6109{
6110	int i, rc;
6111
6112	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6113
6114	/* host must have been started */
6115	if (!(host->flags & ATA_HOST_STARTED)) {
6116		dev_err(host->dev, "BUG: trying to register unstarted host\n");
6117		WARN_ON(1);
6118		return -EINVAL;
6119	}
6120
6121	/* Blow away unused ports.  This happens when LLD can't
6122	 * determine the exact number of ports to allocate at
6123	 * allocation time.
6124	 */
6125	for (i = host->n_ports; host->ports[i]; i++)
6126		kfree(host->ports[i]);
6127
6128	/* give ports names and add SCSI hosts */
6129	for (i = 0; i < host->n_ports; i++) {
6130		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6131		host->ports[i]->local_port_no = i + 1;
6132	}
6133
6134	/* Create associated sysfs transport objects  */
6135	for (i = 0; i < host->n_ports; i++) {
6136		rc = ata_tport_add(host->dev,host->ports[i]);
6137		if (rc) {
6138			goto err_tadd;
6139		}
6140	}
6141
6142	rc = ata_scsi_add_hosts(host, sht);
6143	if (rc)
6144		goto err_tadd;
6145
6146	/* set cable, sata_spd_limit and report */
6147	for (i = 0; i < host->n_ports; i++) {
6148		struct ata_port *ap = host->ports[i];
6149		unsigned long xfer_mask;
6150
6151		/* set SATA cable type if still unset */
6152		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6153			ap->cbl = ATA_CBL_SATA;
6154
6155		/* init sata_spd_limit to the current value */
6156		sata_link_init_spd(&ap->link);
6157		if (ap->slave_link)
6158			sata_link_init_spd(ap->slave_link);
6159
6160		/* print per-port info to dmesg */
6161		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6162					      ap->udma_mask);
6163
6164		if (!ata_port_is_dummy(ap)) {
6165			ata_port_info(ap, "%cATA max %s %s\n",
6166				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6167				      ata_mode_string(xfer_mask),
6168				      ap->link.eh_info.desc);
6169			ata_ehi_clear_desc(&ap->link.eh_info);
6170		} else
6171			ata_port_info(ap, "DUMMY\n");
6172	}
6173
6174	/* perform each probe asynchronously */
6175	for (i = 0; i < host->n_ports; i++) {
6176		struct ata_port *ap = host->ports[i];
6177		async_schedule(async_port_probe, ap);
6178	}
6179
6180	return 0;
6181
6182 err_tadd:
6183	while (--i >= 0) {
6184		ata_tport_delete(host->ports[i]);
6185	}
6186	return rc;
6187
6188}
6189
6190/**
6191 *	ata_host_activate - start host, request IRQ and register it
6192 *	@host: target ATA host
6193 *	@irq: IRQ to request
6194 *	@irq_handler: irq_handler used when requesting IRQ
6195 *	@irq_flags: irq_flags used when requesting IRQ
6196 *	@sht: scsi_host_template to use when registering the host
6197 *
6198 *	After allocating an ATA host and initializing it, most libata
6199 *	LLDs perform three steps to activate the host - start host,
6200 *	request IRQ and register it.  This helper takes necessasry
6201 *	arguments and performs the three steps in one go.
6202 *
6203 *	An invalid IRQ skips the IRQ registration and expects the host to
6204 *	have set polling mode on the port. In this case, @irq_handler
6205 *	should be NULL.
6206 *
6207 *	LOCKING:
6208 *	Inherited from calling layer (may sleep).
6209 *
6210 *	RETURNS:
6211 *	0 on success, -errno otherwise.
6212 */
6213int ata_host_activate(struct ata_host *host, int irq,
6214		      irq_handler_t irq_handler, unsigned long irq_flags,
6215		      struct scsi_host_template *sht)
6216{
6217	int i, rc;
6218
6219	rc = ata_host_start(host);
6220	if (rc)
6221		return rc;
6222
6223	/* Special case for polling mode */
6224	if (!irq) {
6225		WARN_ON(irq_handler);
6226		return ata_host_register(host, sht);
6227	}
6228
6229	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6230			      dev_name(host->dev), host);
6231	if (rc)
6232		return rc;
6233
6234	for (i = 0; i < host->n_ports; i++)
6235		ata_port_desc(host->ports[i], "irq %d", irq);
6236
6237	rc = ata_host_register(host, sht);
6238	/* if failed, just free the IRQ and leave ports alone */
6239	if (rc)
6240		devm_free_irq(host->dev, irq, host);
6241
6242	return rc;
6243}
6244
6245/**
6246 *	ata_port_detach - Detach ATA port in prepration of device removal
6247 *	@ap: ATA port to be detached
6248 *
6249 *	Detach all ATA devices and the associated SCSI devices of @ap;
6250 *	then, remove the associated SCSI host.  @ap is guaranteed to
6251 *	be quiescent on return from this function.
6252 *
6253 *	LOCKING:
6254 *	Kernel thread context (may sleep).
6255 */
6256static void ata_port_detach(struct ata_port *ap)
6257{
6258	unsigned long flags;
6259	struct ata_link *link;
6260	struct ata_device *dev;
6261
6262	if (!ap->ops->error_handler)
6263		goto skip_eh;
6264
6265	/* tell EH we're leaving & flush EH */
6266	spin_lock_irqsave(ap->lock, flags);
6267	ap->pflags |= ATA_PFLAG_UNLOADING;
6268	ata_port_schedule_eh(ap);
6269	spin_unlock_irqrestore(ap->lock, flags);
6270
6271	/* wait till EH commits suicide */
6272	ata_port_wait_eh(ap);
6273
6274	/* it better be dead now */
6275	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6276
6277	cancel_delayed_work_sync(&ap->hotplug_task);
6278
6279 skip_eh:
6280	/* clean up zpodd on port removal */
6281	ata_for_each_link(link, ap, HOST_FIRST) {
6282		ata_for_each_dev(dev, link, ALL) {
6283			if (zpodd_dev_enabled(dev))
6284				zpodd_exit(dev);
6285		}
6286	}
6287	if (ap->pmp_link) {
6288		int i;
6289		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6290			ata_tlink_delete(&ap->pmp_link[i]);
6291	}
6292	/* remove the associated SCSI host */
6293	scsi_remove_host(ap->scsi_host);
6294	ata_tport_delete(ap);
6295}
6296
6297/**
6298 *	ata_host_detach - Detach all ports of an ATA host
6299 *	@host: Host to detach
6300 *
6301 *	Detach all ports of @host.
6302 *
6303 *	LOCKING:
6304 *	Kernel thread context (may sleep).
6305 */
6306void ata_host_detach(struct ata_host *host)
6307{
6308	int i;
6309
6310	for (i = 0; i < host->n_ports; i++)
6311		ata_port_detach(host->ports[i]);
6312
6313	/* the host is dead now, dissociate ACPI */
6314	ata_acpi_dissociate(host);
6315}
6316
6317#ifdef CONFIG_PCI
6318
6319/**
6320 *	ata_pci_remove_one - PCI layer callback for device removal
6321 *	@pdev: PCI device that was removed
6322 *
6323 *	PCI layer indicates to libata via this hook that hot-unplug or
6324 *	module unload event has occurred.  Detach all ports.  Resource
6325 *	release is handled via devres.
6326 *
6327 *	LOCKING:
6328 *	Inherited from PCI layer (may sleep).
6329 */
6330void ata_pci_remove_one(struct pci_dev *pdev)
6331{
6332	struct ata_host *host = pci_get_drvdata(pdev);
6333
6334	ata_host_detach(host);
6335}
6336
6337/* move to PCI subsystem */
6338int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6339{
6340	unsigned long tmp = 0;
6341
6342	switch (bits->width) {
6343	case 1: {
6344		u8 tmp8 = 0;
6345		pci_read_config_byte(pdev, bits->reg, &tmp8);
6346		tmp = tmp8;
6347		break;
6348	}
6349	case 2: {
6350		u16 tmp16 = 0;
6351		pci_read_config_word(pdev, bits->reg, &tmp16);
6352		tmp = tmp16;
6353		break;
6354	}
6355	case 4: {
6356		u32 tmp32 = 0;
6357		pci_read_config_dword(pdev, bits->reg, &tmp32);
6358		tmp = tmp32;
6359		break;
6360	}
6361
6362	default:
6363		return -EINVAL;
6364	}
6365
6366	tmp &= bits->mask;
6367
6368	return (tmp == bits->val) ? 1 : 0;
6369}
6370
6371#ifdef CONFIG_PM
6372void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6373{
6374	pci_save_state(pdev);
6375	pci_disable_device(pdev);
6376
6377	if (mesg.event & PM_EVENT_SLEEP)
6378		pci_set_power_state(pdev, PCI_D3hot);
6379}
6380
6381int ata_pci_device_do_resume(struct pci_dev *pdev)
6382{
6383	int rc;
6384
6385	pci_set_power_state(pdev, PCI_D0);
6386	pci_restore_state(pdev);
6387
6388	rc = pcim_enable_device(pdev);
6389	if (rc) {
6390		dev_err(&pdev->dev,
6391			"failed to enable device after resume (%d)\n", rc);
6392		return rc;
6393	}
6394
6395	pci_set_master(pdev);
6396	return 0;
6397}
6398
6399int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6400{
6401	struct ata_host *host = pci_get_drvdata(pdev);
6402	int rc = 0;
6403
6404	rc = ata_host_suspend(host, mesg);
6405	if (rc)
6406		return rc;
6407
6408	ata_pci_device_do_suspend(pdev, mesg);
6409
6410	return 0;
6411}
6412
6413int ata_pci_device_resume(struct pci_dev *pdev)
6414{
6415	struct ata_host *host = pci_get_drvdata(pdev);
6416	int rc;
6417
6418	rc = ata_pci_device_do_resume(pdev);
6419	if (rc == 0)
6420		ata_host_resume(host);
6421	return rc;
6422}
6423#endif /* CONFIG_PM */
6424
6425#endif /* CONFIG_PCI */
6426
6427/**
6428 *	ata_platform_remove_one - Platform layer callback for device removal
6429 *	@pdev: Platform device that was removed
6430 *
6431 *	Platform layer indicates to libata via this hook that hot-unplug or
6432 *	module unload event has occurred.  Detach all ports.  Resource
6433 *	release is handled via devres.
6434 *
6435 *	LOCKING:
6436 *	Inherited from platform layer (may sleep).
6437 */
6438int ata_platform_remove_one(struct platform_device *pdev)
6439{
6440	struct ata_host *host = platform_get_drvdata(pdev);
6441
6442	ata_host_detach(host);
6443
6444	return 0;
6445}
6446
6447static int __init ata_parse_force_one(char **cur,
6448				      struct ata_force_ent *force_ent,
6449				      const char **reason)
6450{
6451	/* FIXME: Currently, there's no way to tag init const data and
6452	 * using __initdata causes build failure on some versions of
6453	 * gcc.  Once __initdataconst is implemented, add const to the
6454	 * following structure.
6455	 */
6456	static struct ata_force_param force_tbl[] __initdata = {
6457		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6458		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6459		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6460		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6461		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6462		{ "sata",	.cbl		= ATA_CBL_SATA },
6463		{ "1.5Gbps",	.spd_limit	= 1 },
6464		{ "3.0Gbps",	.spd_limit	= 2 },
6465		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6466		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6467		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
6468		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6469		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6470		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6471		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6472		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6473		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6474		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6475		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6476		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6477		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6478		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6479		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6480		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6481		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6482		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6483		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6484		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6485		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6486		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6487		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6488		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6489		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6490		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6491		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6492		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6493		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6494		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6495		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6496		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6497		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6498		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6499		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6500		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6501		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6502		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6503		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6504		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6505		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE },
6506		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR },
6507		{ "disable",	.horkage_on	= ATA_HORKAGE_DISABLE },
6508	};
6509	char *start = *cur, *p = *cur;
6510	char *id, *val, *endp;
6511	const struct ata_force_param *match_fp = NULL;
6512	int nr_matches = 0, i;
6513
6514	/* find where this param ends and update *cur */
6515	while (*p != '\0' && *p != ',')
6516		p++;
6517
6518	if (*p == '\0')
6519		*cur = p;
6520	else
6521		*cur = p + 1;
6522
6523	*p = '\0';
6524
6525	/* parse */
6526	p = strchr(start, ':');
6527	if (!p) {
6528		val = strstrip(start);
6529		goto parse_val;
6530	}
6531	*p = '\0';
6532
6533	id = strstrip(start);
6534	val = strstrip(p + 1);
6535
6536	/* parse id */
6537	p = strchr(id, '.');
6538	if (p) {
6539		*p++ = '\0';
6540		force_ent->device = simple_strtoul(p, &endp, 10);
6541		if (p == endp || *endp != '\0') {
6542			*reason = "invalid device";
6543			return -EINVAL;
6544		}
6545	}
6546
6547	force_ent->port = simple_strtoul(id, &endp, 10);
6548	if (p == endp || *endp != '\0') {
6549		*reason = "invalid port/link";
6550		return -EINVAL;
6551	}
6552
6553 parse_val:
6554	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6555	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6556		const struct ata_force_param *fp = &force_tbl[i];
6557
6558		if (strncasecmp(val, fp->name, strlen(val)))
6559			continue;
6560
6561		nr_matches++;
6562		match_fp = fp;
6563
6564		if (strcasecmp(val, fp->name) == 0) {
6565			nr_matches = 1;
6566			break;
6567		}
6568	}
6569
6570	if (!nr_matches) {
6571		*reason = "unknown value";
6572		return -EINVAL;
6573	}
6574	if (nr_matches > 1) {
6575		*reason = "ambigious value";
6576		return -EINVAL;
6577	}
6578
6579	force_ent->param = *match_fp;
6580
6581	return 0;
6582}
6583
6584static void __init ata_parse_force_param(void)
6585{
6586	int idx = 0, size = 1;
6587	int last_port = -1, last_device = -1;
6588	char *p, *cur, *next;
6589
6590	/* calculate maximum number of params and allocate force_tbl */
6591	for (p = ata_force_param_buf; *p; p++)
6592		if (*p == ',')
6593			size++;
6594
6595	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6596	if (!ata_force_tbl) {
6597		printk(KERN_WARNING "ata: failed to extend force table, "
6598		       "libata.force ignored\n");
6599		return;
6600	}
6601
6602	/* parse and populate the table */
6603	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6604		const char *reason = "";
6605		struct ata_force_ent te = { .port = -1, .device = -1 };
6606
6607		next = cur;
6608		if (ata_parse_force_one(&next, &te, &reason)) {
6609			printk(KERN_WARNING "ata: failed to parse force "
6610			       "parameter \"%s\" (%s)\n",
6611			       cur, reason);
6612			continue;
6613		}
6614
6615		if (te.port == -1) {
6616			te.port = last_port;
6617			te.device = last_device;
6618		}
6619
6620		ata_force_tbl[idx++] = te;
6621
6622		last_port = te.port;
6623		last_device = te.device;
6624	}
6625
6626	ata_force_tbl_size = idx;
6627}
6628
6629static int __init ata_init(void)
6630{
6631	int rc;
6632
6633	ata_parse_force_param();
6634
6635	rc = ata_sff_init();
6636	if (rc) {
6637		kfree(ata_force_tbl);
6638		return rc;
6639	}
6640
6641	libata_transport_init();
6642	ata_scsi_transport_template = ata_attach_transport();
6643	if (!ata_scsi_transport_template) {
6644		ata_sff_exit();
6645		rc = -ENOMEM;
6646		goto err_out;
6647	}
6648
6649	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6650	return 0;
6651
6652err_out:
6653	return rc;
6654}
6655
6656static void __exit ata_exit(void)
6657{
6658	ata_release_transport(ata_scsi_transport_template);
6659	libata_transport_exit();
6660	ata_sff_exit();
6661	kfree(ata_force_tbl);
6662}
6663
6664subsys_initcall(ata_init);
6665module_exit(ata_exit);
6666
6667static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6668
6669int ata_ratelimit(void)
6670{
6671	return __ratelimit(&ratelimit);
6672}
6673
6674/**
6675 *	ata_msleep - ATA EH owner aware msleep
6676 *	@ap: ATA port to attribute the sleep to
6677 *	@msecs: duration to sleep in milliseconds
6678 *
6679 *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
6680 *	ownership is released before going to sleep and reacquired
6681 *	after the sleep is complete.  IOW, other ports sharing the
6682 *	@ap->host will be allowed to own the EH while this task is
6683 *	sleeping.
6684 *
6685 *	LOCKING:
6686 *	Might sleep.
6687 */
6688void ata_msleep(struct ata_port *ap, unsigned int msecs)
6689{
6690	bool owns_eh = ap && ap->host->eh_owner == current;
6691
6692	if (owns_eh)
6693		ata_eh_release(ap);
6694
6695	msleep(msecs);
6696
6697	if (owns_eh)
6698		ata_eh_acquire(ap);
6699}
6700
6701/**
6702 *	ata_wait_register - wait until register value changes
6703 *	@ap: ATA port to wait register for, can be NULL
6704 *	@reg: IO-mapped register
6705 *	@mask: Mask to apply to read register value
6706 *	@val: Wait condition
6707 *	@interval: polling interval in milliseconds
6708 *	@timeout: timeout in milliseconds
6709 *
6710 *	Waiting for some bits of register to change is a common
6711 *	operation for ATA controllers.  This function reads 32bit LE
6712 *	IO-mapped register @reg and tests for the following condition.
6713 *
6714 *	(*@reg & mask) != val
6715 *
6716 *	If the condition is met, it returns; otherwise, the process is
6717 *	repeated after @interval_msec until timeout.
6718 *
6719 *	LOCKING:
6720 *	Kernel thread context (may sleep)
6721 *
6722 *	RETURNS:
6723 *	The final register value.
6724 */
6725u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6726		      unsigned long interval, unsigned long timeout)
6727{
6728	unsigned long deadline;
6729	u32 tmp;
6730
6731	tmp = ioread32(reg);
6732
6733	/* Calculate timeout _after_ the first read to make sure
6734	 * preceding writes reach the controller before starting to
6735	 * eat away the timeout.
6736	 */
6737	deadline = ata_deadline(jiffies, timeout);
6738
6739	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6740		ata_msleep(ap, interval);
6741		tmp = ioread32(reg);
6742	}
6743
6744	return tmp;
6745}
6746
6747/*
6748 * Dummy port_ops
6749 */
6750static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6751{
6752	return AC_ERR_SYSTEM;
6753}
6754
6755static void ata_dummy_error_handler(struct ata_port *ap)
6756{
6757	/* truly dummy */
6758}
6759
6760struct ata_port_operations ata_dummy_port_ops = {
6761	.qc_prep		= ata_noop_qc_prep,
6762	.qc_issue		= ata_dummy_qc_issue,
6763	.error_handler		= ata_dummy_error_handler,
6764	.sched_eh		= ata_std_sched_eh,
6765	.end_eh			= ata_std_end_eh,
6766};
6767
6768const struct ata_port_info ata_dummy_port_info = {
6769	.port_ops		= &ata_dummy_port_ops,
6770};
6771
6772/*
6773 * Utility print functions
6774 */
6775void ata_port_printk(const struct ata_port *ap, const char *level,
6776		     const char *fmt, ...)
6777{
6778	struct va_format vaf;
6779	va_list args;
6780
6781	va_start(args, fmt);
6782
6783	vaf.fmt = fmt;
6784	vaf.va = &args;
6785
6786	printk("%sata%u: %pV", level, ap->print_id, &vaf);
6787
6788	va_end(args);
6789}
6790EXPORT_SYMBOL(ata_port_printk);
6791
6792void ata_link_printk(const struct ata_link *link, const char *level,
6793		     const char *fmt, ...)
6794{
6795	struct va_format vaf;
6796	va_list args;
6797
6798	va_start(args, fmt);
6799
6800	vaf.fmt = fmt;
6801	vaf.va = &args;
6802
6803	if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6804		printk("%sata%u.%02u: %pV",
6805		       level, link->ap->print_id, link->pmp, &vaf);
6806	else
6807		printk("%sata%u: %pV",
6808		       level, link->ap->print_id, &vaf);
6809
6810	va_end(args);
6811}
6812EXPORT_SYMBOL(ata_link_printk);
6813
6814void ata_dev_printk(const struct ata_device *dev, const char *level,
6815		    const char *fmt, ...)
6816{
6817	struct va_format vaf;
6818	va_list args;
6819
6820	va_start(args, fmt);
6821
6822	vaf.fmt = fmt;
6823	vaf.va = &args;
6824
6825	printk("%sata%u.%02u: %pV",
6826	       level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6827	       &vaf);
6828
6829	va_end(args);
6830}
6831EXPORT_SYMBOL(ata_dev_printk);
6832
6833void ata_print_version(const struct device *dev, const char *version)
6834{
6835	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6836}
6837EXPORT_SYMBOL(ata_print_version);
6838
6839/*
6840 * libata is essentially a library of internal helper functions for
6841 * low-level ATA host controller drivers.  As such, the API/ABI is
6842 * likely to change as new drivers are added and updated.
6843 * Do not depend on ABI/API stability.
6844 */
6845EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6846EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6847EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6848EXPORT_SYMBOL_GPL(ata_base_port_ops);
6849EXPORT_SYMBOL_GPL(sata_port_ops);
6850EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6851EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6852EXPORT_SYMBOL_GPL(ata_link_next);
6853EXPORT_SYMBOL_GPL(ata_dev_next);
6854EXPORT_SYMBOL_GPL(ata_std_bios_param);
6855EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6856EXPORT_SYMBOL_GPL(ata_host_init);
6857EXPORT_SYMBOL_GPL(ata_host_alloc);
6858EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6859EXPORT_SYMBOL_GPL(ata_slave_link_init);
6860EXPORT_SYMBOL_GPL(ata_host_start);
6861EXPORT_SYMBOL_GPL(ata_host_register);
6862EXPORT_SYMBOL_GPL(ata_host_activate);
6863EXPORT_SYMBOL_GPL(ata_host_detach);
6864EXPORT_SYMBOL_GPL(ata_sg_init);
6865EXPORT_SYMBOL_GPL(ata_qc_complete);
6866EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6867EXPORT_SYMBOL_GPL(atapi_cmd_type);
6868EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6869EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6870EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6871EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6872EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6873EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6874EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6875EXPORT_SYMBOL_GPL(ata_mode_string);
6876EXPORT_SYMBOL_GPL(ata_id_xfermask);
6877EXPORT_SYMBOL_GPL(ata_do_set_mode);
6878EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6879EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6880EXPORT_SYMBOL_GPL(ata_dev_disable);
6881EXPORT_SYMBOL_GPL(sata_set_spd);
6882EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6883EXPORT_SYMBOL_GPL(sata_link_debounce);
6884EXPORT_SYMBOL_GPL(sata_link_resume);
6885EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6886EXPORT_SYMBOL_GPL(ata_std_prereset);
6887EXPORT_SYMBOL_GPL(sata_link_hardreset);
6888EXPORT_SYMBOL_GPL(sata_std_hardreset);
6889EXPORT_SYMBOL_GPL(ata_std_postreset);
6890EXPORT_SYMBOL_GPL(ata_dev_classify);
6891EXPORT_SYMBOL_GPL(ata_dev_pair);
6892EXPORT_SYMBOL_GPL(ata_ratelimit);
6893EXPORT_SYMBOL_GPL(ata_msleep);
6894EXPORT_SYMBOL_GPL(ata_wait_register);
6895EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6896EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6897EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6898EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6899EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
6900EXPORT_SYMBOL_GPL(sata_scr_valid);
6901EXPORT_SYMBOL_GPL(sata_scr_read);
6902EXPORT_SYMBOL_GPL(sata_scr_write);
6903EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6904EXPORT_SYMBOL_GPL(ata_link_online);
6905EXPORT_SYMBOL_GPL(ata_link_offline);
6906#ifdef CONFIG_PM
6907EXPORT_SYMBOL_GPL(ata_host_suspend);
6908EXPORT_SYMBOL_GPL(ata_host_resume);
6909#endif /* CONFIG_PM */
6910EXPORT_SYMBOL_GPL(ata_id_string);
6911EXPORT_SYMBOL_GPL(ata_id_c_string);
6912EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6913EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6914
6915EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6916EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6917EXPORT_SYMBOL_GPL(ata_timing_compute);
6918EXPORT_SYMBOL_GPL(ata_timing_merge);
6919EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6920
6921#ifdef CONFIG_PCI
6922EXPORT_SYMBOL_GPL(pci_test_config_bits);
6923EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6924#ifdef CONFIG_PM
6925EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6926EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6927EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6928EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6929#endif /* CONFIG_PM */
6930#endif /* CONFIG_PCI */
6931
6932EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6933
6934EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6935EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6936EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6937EXPORT_SYMBOL_GPL(ata_port_desc);
6938#ifdef CONFIG_PCI
6939EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6940#endif /* CONFIG_PCI */
6941EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6942EXPORT_SYMBOL_GPL(ata_link_abort);
6943EXPORT_SYMBOL_GPL(ata_port_abort);
6944EXPORT_SYMBOL_GPL(ata_port_freeze);
6945EXPORT_SYMBOL_GPL(sata_async_notification);
6946EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6947EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6948EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6949EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6950EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6951EXPORT_SYMBOL_GPL(ata_do_eh);
6952EXPORT_SYMBOL_GPL(ata_std_error_handler);
6953
6954EXPORT_SYMBOL_GPL(ata_cable_40wire);
6955EXPORT_SYMBOL_GPL(ata_cable_80wire);
6956EXPORT_SYMBOL_GPL(ata_cable_unknown);
6957EXPORT_SYMBOL_GPL(ata_cable_ignore);
6958EXPORT_SYMBOL_GPL(ata_cable_sata);
6959