libata.h revision f0d36efdc624beb3d9e29b9ab9e9537bf0f25d5b
1/* 2 * Copyright 2003-2005 Red Hat, Inc. All rights reserved. 3 * Copyright 2003-2005 Jeff Garzik 4 * 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2, or (at your option) 9 * any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; see the file COPYING. If not, write to 18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 19 * 20 * 21 * libata documentation is available via 'make {ps|pdf}docs', 22 * as Documentation/DocBook/libata.* 23 * 24 */ 25 26#ifndef __LINUX_LIBATA_H__ 27#define __LINUX_LIBATA_H__ 28 29#include <linux/delay.h> 30#include <linux/interrupt.h> 31#include <linux/pci.h> 32#include <linux/dma-mapping.h> 33#include <asm/scatterlist.h> 34#include <linux/io.h> 35#include <linux/ata.h> 36#include <linux/workqueue.h> 37#include <scsi/scsi_host.h> 38 39/* 40 * Define if arch has non-standard setup. This is a _PCI_ standard 41 * not a legacy or ISA standard. 42 */ 43#ifdef CONFIG_ATA_NONSTANDARD 44#include <asm/libata-portmap.h> 45#else 46#include <asm-generic/libata-portmap.h> 47#endif 48 49/* 50 * compile-time options: to be removed as soon as all the drivers are 51 * converted to the new debugging mechanism 52 */ 53#undef ATA_DEBUG /* debugging output */ 54#undef ATA_VERBOSE_DEBUG /* yet more debugging output */ 55#undef ATA_IRQ_TRAP /* define to ack screaming irqs */ 56#undef ATA_NDEBUG /* define to disable quick runtime checks */ 57#define ATA_ENABLE_PATA /* define to enable PATA support in some 58 * low-level drivers */ 59 60 61/* note: prints function name for you */ 62#ifdef ATA_DEBUG 63#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 64#ifdef ATA_VERBOSE_DEBUG 65#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 66#else 67#define VPRINTK(fmt, args...) 68#endif /* ATA_VERBOSE_DEBUG */ 69#else 70#define DPRINTK(fmt, args...) 71#define VPRINTK(fmt, args...) 72#endif /* ATA_DEBUG */ 73 74#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 75 76/* NEW: debug levels */ 77#define HAVE_LIBATA_MSG 1 78 79enum { 80 ATA_MSG_DRV = 0x0001, 81 ATA_MSG_INFO = 0x0002, 82 ATA_MSG_PROBE = 0x0004, 83 ATA_MSG_WARN = 0x0008, 84 ATA_MSG_MALLOC = 0x0010, 85 ATA_MSG_CTL = 0x0020, 86 ATA_MSG_INTR = 0x0040, 87 ATA_MSG_ERR = 0x0080, 88}; 89 90#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV) 91#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO) 92#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE) 93#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN) 94#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC) 95#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL) 96#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR) 97#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR) 98 99static inline u32 ata_msg_init(int dval, int default_msg_enable_bits) 100{ 101 if (dval < 0 || dval >= (sizeof(u32) * 8)) 102 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */ 103 if (!dval) 104 return 0; 105 return (1 << dval) - 1; 106} 107 108/* defines only for the constants which don't work well as enums */ 109#define ATA_TAG_POISON 0xfafbfcfdU 110 111/* move to PCI layer? */ 112static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) 113{ 114 return &pdev->dev; 115} 116 117enum { 118 /* various global constants */ 119 LIBATA_MAX_PRD = ATA_MAX_PRD / 2, 120 ATA_MAX_PORTS = 8, 121 ATA_DEF_QUEUE = 1, 122 /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */ 123 ATA_MAX_QUEUE = 32, 124 ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1, 125 ATA_MAX_BUS = 2, 126 ATA_DEF_BUSY_WAIT = 10000, 127 ATA_SHORT_PAUSE = (HZ >> 6) + 1, 128 129 ATA_SHT_EMULATED = 1, 130 ATA_SHT_CMD_PER_LUN = 1, 131 ATA_SHT_THIS_ID = -1, 132 ATA_SHT_USE_CLUSTERING = 1, 133 134 /* struct ata_device stuff */ 135 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ 136 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ 137 ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */ 138 ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */ 139 ATA_DFLAG_FLUSH_EXT = (1 << 4), /* do FLUSH_EXT instead of FLUSH */ 140 ATA_DFLAG_CFG_MASK = (1 << 8) - 1, 141 142 ATA_DFLAG_PIO = (1 << 8), /* device limited to PIO mode */ 143 ATA_DFLAG_NCQ_OFF = (1 << 9), /* device limited to non-NCQ mode */ 144 ATA_DFLAG_SUSPENDED = (1 << 10), /* device suspended */ 145 ATA_DFLAG_INIT_MASK = (1 << 16) - 1, 146 147 ATA_DFLAG_DETACH = (1 << 16), 148 ATA_DFLAG_DETACHED = (1 << 17), 149 150 ATA_DEV_UNKNOWN = 0, /* unknown device */ 151 ATA_DEV_ATA = 1, /* ATA device */ 152 ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ 153 ATA_DEV_ATAPI = 3, /* ATAPI device */ 154 ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */ 155 ATA_DEV_NONE = 5, /* no device */ 156 157 /* struct ata_port flags */ 158 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ 159 /* (doesn't imply presence) */ 160 ATA_FLAG_SATA = (1 << 1), 161 ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */ 162 ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */ 163 ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */ 164 ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */ 165 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ 166 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ 167 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ 168 ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD 169 * doesn't handle PIO interrupts */ 170 ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */ 171 ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */ 172 ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H 173 * Register FIS clearing BSY */ 174 ATA_FLAG_DEBUGMSG = (1 << 13), 175 ATA_FLAG_SETXFER_POLLING= (1 << 14), /* use polling for SETXFER */ 176 ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */ 177 178 /* The following flag belongs to ap->pflags but is kept in 179 * ap->flags because it's referenced in many LLDs and will be 180 * removed in not-too-distant future. 181 */ 182 ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */ 183 184 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 185 186 /* struct ata_port pflags */ 187 ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ 188 ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ 189 ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ 190 ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ 191 ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ 192 ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */ 193 ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ 194 195 ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */ 196 ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ 197 ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ 198 199 /* struct ata_queued_cmd flags */ 200 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ 201 ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */ 202 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */ 203 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 204 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ 205 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ 206 207 ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ 208 ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ 209 ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */ 210 211 /* host set flags */ 212 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */ 213 214 /* various lengths of time */ 215 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ 216 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ 217 ATA_TMOUT_INTERNAL = 30 * HZ, 218 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, 219 220 /* ATA bus states */ 221 BUS_UNKNOWN = 0, 222 BUS_DMA = 1, 223 BUS_IDLE = 2, 224 BUS_NOINTR = 3, 225 BUS_NODATA = 4, 226 BUS_TIMER = 5, 227 BUS_PIO = 6, 228 BUS_EDD = 7, 229 BUS_IDENTIFY = 8, 230 BUS_PACKET = 9, 231 232 /* SATA port states */ 233 PORT_UNKNOWN = 0, 234 PORT_ENABLED = 1, 235 PORT_DISABLED = 2, 236 237 /* encoding various smaller bitmaps into a single 238 * unsigned int bitmap 239 */ 240 ATA_BITS_PIO = 7, 241 ATA_BITS_MWDMA = 5, 242 ATA_BITS_UDMA = 8, 243 244 ATA_SHIFT_PIO = 0, 245 ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_BITS_PIO, 246 ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_BITS_MWDMA, 247 248 ATA_MASK_PIO = ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO, 249 ATA_MASK_MWDMA = ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA, 250 ATA_MASK_UDMA = ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA, 251 252 /* size of buffer to pad xfers ending on unaligned boundaries */ 253 ATA_DMA_PAD_SZ = 4, 254 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, 255 256 /* masks for port functions */ 257 ATA_PORT_PRIMARY = (1 << 0), 258 ATA_PORT_SECONDARY = (1 << 1), 259 260 /* ering size */ 261 ATA_ERING_SIZE = 32, 262 263 /* desc_len for ata_eh_info and context */ 264 ATA_EH_DESC_LEN = 80, 265 266 /* reset / recovery action types */ 267 ATA_EH_REVALIDATE = (1 << 0), 268 ATA_EH_SOFTRESET = (1 << 1), 269 ATA_EH_HARDRESET = (1 << 2), 270 ATA_EH_SUSPEND = (1 << 3), 271 ATA_EH_RESUME = (1 << 4), 272 ATA_EH_PM_FREEZE = (1 << 5), 273 274 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, 275 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_SUSPEND | 276 ATA_EH_RESUME | ATA_EH_PM_FREEZE, 277 278 /* ata_eh_info->flags */ 279 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 280 ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */ 281 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ 282 ATA_EHI_QUIET = (1 << 3), /* be quiet */ 283 284 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */ 285 ATA_EHI_PRINTINFO = (1 << 17), /* print configuration info */ 286 ATA_EHI_SETMODE = (1 << 18), /* configure transfer mode */ 287 ATA_EHI_POST_SETMODE = (1 << 19), /* revaildating after setmode */ 288 289 ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK, 290 291 /* max repeat if error condition is still set after ->error_handler */ 292 ATA_EH_MAX_REPEAT = 5, 293 294 /* how hard are we gonna try to probe/recover devices */ 295 ATA_PROBE_MAX_TRIES = 3, 296 ATA_EH_RESET_TRIES = 3, 297 ATA_EH_DEV_TRIES = 3, 298 299 /* Drive spinup time (time from power-on to the first D2H FIS) 300 * in msecs - 8s currently. Failing to get ready in this time 301 * isn't critical. It will result in reset failure for 302 * controllers which can't wait for the first D2H FIS. libata 303 * will retry, so it just has to be long enough to spin up 304 * most devices. 305 */ 306 ATA_SPINUP_WAIT = 8000, 307 308 /* Horkage types. May be set by libata or controller on drives 309 (some horkage may be drive/controller pair dependant */ 310 311 ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */ 312 ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */ 313 ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */ 314}; 315 316enum hsm_task_states { 317 HSM_ST_IDLE, /* no command on going */ 318 HSM_ST, /* (waiting the device to) transfer data */ 319 HSM_ST_LAST, /* (waiting the device to) complete command */ 320 HSM_ST_ERR, /* error */ 321 HSM_ST_FIRST, /* (waiting the device to) 322 write CDB or first data block */ 323}; 324 325enum ata_completion_errors { 326 AC_ERR_DEV = (1 << 0), /* device reported error */ 327 AC_ERR_HSM = (1 << 1), /* host state machine violation */ 328 AC_ERR_TIMEOUT = (1 << 2), /* timeout */ 329 AC_ERR_MEDIA = (1 << 3), /* media error */ 330 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */ 331 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */ 332 AC_ERR_SYSTEM = (1 << 6), /* system error */ 333 AC_ERR_INVALID = (1 << 7), /* invalid argument */ 334 AC_ERR_OTHER = (1 << 8), /* unknown */ 335 AC_ERR_NODEV_HINT = (1 << 9), /* polling device detection hint */ 336}; 337 338/* forward declarations */ 339struct scsi_device; 340struct ata_port_operations; 341struct ata_port; 342struct ata_queued_cmd; 343 344/* typedefs */ 345typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 346typedef int (*ata_prereset_fn_t)(struct ata_port *ap); 347typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes); 348typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes); 349 350struct ata_ioports { 351 unsigned long cmd_addr; 352 unsigned long data_addr; 353 unsigned long error_addr; 354 unsigned long feature_addr; 355 unsigned long nsect_addr; 356 unsigned long lbal_addr; 357 unsigned long lbam_addr; 358 unsigned long lbah_addr; 359 unsigned long device_addr; 360 unsigned long status_addr; 361 unsigned long command_addr; 362 unsigned long altstatus_addr; 363 unsigned long ctl_addr; 364 unsigned long bmdma_addr; 365 unsigned long scr_addr; 366}; 367 368struct ata_probe_ent { 369 struct list_head node; 370 struct device *dev; 371 const struct ata_port_operations *port_ops; 372 struct scsi_host_template *sht; 373 struct ata_ioports port[ATA_MAX_PORTS]; 374 unsigned int n_ports; 375 unsigned int dummy_port_mask; 376 unsigned int pio_mask; 377 unsigned int mwdma_mask; 378 unsigned int udma_mask; 379 unsigned long irq; 380 unsigned long irq2; 381 unsigned int irq_flags; 382 unsigned long port_flags; 383 unsigned long _host_flags; 384 void __iomem *mmio_base; 385 void *private_data; 386 387 /* port_info for the secondary port. Together with irq2, it's 388 * used to implement non-uniform secondary port. Currently, 389 * the only user is ata_piix combined mode. This workaround 390 * will be removed together with ata_probe_ent when init model 391 * is updated. 392 */ 393 const struct ata_port_info *pinfo2; 394}; 395 396struct ata_host { 397 spinlock_t lock; 398 struct device *dev; 399 unsigned long irq; 400 unsigned long irq2; 401 void __iomem *mmio_base; 402 unsigned int n_ports; 403 void *private_data; 404 const struct ata_port_operations *ops; 405 unsigned long flags; 406 int simplex_claimed; /* Keep seperate in case we 407 ever need to do this locked */ 408 struct ata_port *ports[0]; 409}; 410 411struct ata_queued_cmd { 412 struct ata_port *ap; 413 struct ata_device *dev; 414 415 struct scsi_cmnd *scsicmd; 416 void (*scsidone)(struct scsi_cmnd *); 417 418 struct ata_taskfile tf; 419 u8 cdb[ATAPI_CDB_LEN]; 420 421 unsigned long flags; /* ATA_QCFLAG_xxx */ 422 unsigned int tag; 423 unsigned int n_elem; 424 unsigned int orig_n_elem; 425 426 int dma_dir; 427 428 unsigned int pad_len; 429 430 unsigned int nbytes; 431 unsigned int curbytes; 432 433 unsigned int cursg; 434 unsigned int cursg_ofs; 435 436 struct scatterlist sgent; 437 struct scatterlist pad_sgent; 438 void *buf_virt; 439 440 /* DO NOT iterate over __sg manually, use ata_for_each_sg() */ 441 struct scatterlist *__sg; 442 443 unsigned int err_mask; 444 struct ata_taskfile result_tf; 445 ata_qc_cb_t complete_fn; 446 447 void *private_data; 448}; 449 450struct ata_port_stats { 451 unsigned long unhandled_irq; 452 unsigned long idle_irq; 453 unsigned long rw_reqbuf; 454}; 455 456struct ata_ering_entry { 457 int is_io; 458 unsigned int err_mask; 459 u64 timestamp; 460}; 461 462struct ata_ering { 463 int cursor; 464 struct ata_ering_entry ring[ATA_ERING_SIZE]; 465}; 466 467struct ata_device { 468 struct ata_port *ap; 469 unsigned int devno; /* 0 or 1 */ 470 unsigned long flags; /* ATA_DFLAG_xxx */ 471 struct scsi_device *sdev; /* attached SCSI device */ 472 /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */ 473 u64 n_sectors; /* size of device, if ATA */ 474 unsigned int class; /* ATA_DEV_xxx */ 475 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ 476 u8 pio_mode; 477 u8 dma_mode; 478 u8 xfer_mode; 479 unsigned int xfer_shift; /* ATA_SHIFT_xxx */ 480 481 unsigned int multi_count; /* sectors count for 482 READ/WRITE MULTIPLE */ 483 unsigned int max_sectors; /* per-device max sectors */ 484 unsigned int cdb_len; 485 486 /* per-dev xfer mask */ 487 unsigned int pio_mask; 488 unsigned int mwdma_mask; 489 unsigned int udma_mask; 490 491 /* for CHS addressing */ 492 u16 cylinders; /* Number of cylinders */ 493 u16 heads; /* Number of heads */ 494 u16 sectors; /* Number of sectors per track */ 495 496 /* error history */ 497 struct ata_ering ering; 498 unsigned int horkage; /* List of broken features */ 499}; 500 501/* Offset into struct ata_device. Fields above it are maintained 502 * acress device init. Fields below are zeroed. 503 */ 504#define ATA_DEVICE_CLEAR_OFFSET offsetof(struct ata_device, n_sectors) 505 506struct ata_eh_info { 507 struct ata_device *dev; /* offending device */ 508 u32 serror; /* SError from LLDD */ 509 unsigned int err_mask; /* port-wide err_mask */ 510 unsigned int action; /* ATA_EH_* action mask */ 511 unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */ 512 unsigned int flags; /* ATA_EHI_* flags */ 513 514 unsigned long hotplug_timestamp; 515 unsigned int probe_mask; 516 517 char desc[ATA_EH_DESC_LEN]; 518 int desc_len; 519}; 520 521struct ata_eh_context { 522 struct ata_eh_info i; 523 int tries[ATA_MAX_DEVICES]; 524 unsigned int classes[ATA_MAX_DEVICES]; 525 unsigned int did_probe_mask; 526}; 527 528struct ata_port { 529 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ 530 const struct ata_port_operations *ops; 531 spinlock_t *lock; 532 unsigned long flags; /* ATA_FLAG_xxx */ 533 unsigned int pflags; /* ATA_PFLAG_xxx */ 534 unsigned int id; /* unique id req'd by scsi midlyr */ 535 unsigned int port_no; /* unique port #; from zero */ 536 537 struct ata_prd *prd; /* our SG list */ 538 dma_addr_t prd_dma; /* and its DMA mapping */ 539 540 void *pad; /* array of DMA pad buffers */ 541 dma_addr_t pad_dma; 542 543 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ 544 545 u8 ctl; /* cache of ATA control register */ 546 u8 last_ctl; /* Cache last written value */ 547 unsigned int pio_mask; 548 unsigned int mwdma_mask; 549 unsigned int udma_mask; 550 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 551 unsigned int hw_sata_spd_limit; 552 unsigned int sata_spd_limit; /* SATA PHY speed limit */ 553 554 /* record runtime error info, protected by host lock */ 555 struct ata_eh_info eh_info; 556 /* EH context owned by EH */ 557 struct ata_eh_context eh_context; 558 559 struct ata_device device[ATA_MAX_DEVICES]; 560 561 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; 562 unsigned long qc_allocated; 563 unsigned int qc_active; 564 565 unsigned int active_tag; 566 u32 sactive; 567 568 struct ata_port_stats stats; 569 struct ata_host *host; 570 struct device *dev; 571 572 void *port_task_data; 573 struct delayed_work port_task; 574 struct delayed_work hotplug_task; 575 struct work_struct scsi_rescan_task; 576 577 unsigned int hsm_task_state; 578 579 u32 msg_enable; 580 struct list_head eh_done_q; 581 wait_queue_head_t eh_wait_q; 582 583 pm_message_t pm_mesg; 584 int *pm_result; 585 586 void *private_data; 587 588 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ 589}; 590 591struct ata_port_operations { 592 void (*port_disable) (struct ata_port *); 593 594 void (*dev_config) (struct ata_port *, struct ata_device *); 595 596 void (*set_piomode) (struct ata_port *, struct ata_device *); 597 void (*set_dmamode) (struct ata_port *, struct ata_device *); 598 unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long); 599 600 void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf); 601 void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf); 602 603 void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf); 604 u8 (*check_status)(struct ata_port *ap); 605 u8 (*check_altstatus)(struct ata_port *ap); 606 void (*dev_select)(struct ata_port *ap, unsigned int device); 607 608 void (*phy_reset) (struct ata_port *ap); /* obsolete */ 609 int (*set_mode) (struct ata_port *ap, struct ata_device **r_failed_dev); 610 611 void (*post_set_mode) (struct ata_port *ap); 612 613 int (*check_atapi_dma) (struct ata_queued_cmd *qc); 614 615 void (*bmdma_setup) (struct ata_queued_cmd *qc); 616 void (*bmdma_start) (struct ata_queued_cmd *qc); 617 618 void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int); 619 620 void (*qc_prep) (struct ata_queued_cmd *qc); 621 unsigned int (*qc_issue) (struct ata_queued_cmd *qc); 622 623 /* Error handlers. ->error_handler overrides ->eng_timeout and 624 * indicates that new-style EH is in place. 625 */ 626 void (*eng_timeout) (struct ata_port *ap); /* obsolete */ 627 628 void (*freeze) (struct ata_port *ap); 629 void (*thaw) (struct ata_port *ap); 630 void (*error_handler) (struct ata_port *ap); 631 void (*post_internal_cmd) (struct ata_queued_cmd *qc); 632 633 irq_handler_t irq_handler; 634 void (*irq_clear) (struct ata_port *); 635 636 u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg); 637 void (*scr_write) (struct ata_port *ap, unsigned int sc_reg, 638 u32 val); 639 640 int (*port_suspend) (struct ata_port *ap, pm_message_t mesg); 641 int (*port_resume) (struct ata_port *ap); 642 643 int (*port_start) (struct ata_port *ap); 644 void (*port_stop) (struct ata_port *ap); 645 646 void (*host_stop) (struct ata_host *host); 647 648 void (*bmdma_stop) (struct ata_queued_cmd *qc); 649 u8 (*bmdma_status) (struct ata_port *ap); 650}; 651 652struct ata_port_info { 653 struct scsi_host_template *sht; 654 unsigned long flags; 655 unsigned long pio_mask; 656 unsigned long mwdma_mask; 657 unsigned long udma_mask; 658 const struct ata_port_operations *port_ops; 659 void *private_data; 660}; 661 662struct ata_timing { 663 unsigned short mode; /* ATA mode */ 664 unsigned short setup; /* t1 */ 665 unsigned short act8b; /* t2 for 8-bit I/O */ 666 unsigned short rec8b; /* t2i for 8-bit I/O */ 667 unsigned short cyc8b; /* t0 for 8-bit I/O */ 668 unsigned short active; /* t2 or tD */ 669 unsigned short recover; /* t2i or tK */ 670 unsigned short cycle; /* t0 */ 671 unsigned short udma; /* t2CYCTYP/2 */ 672}; 673 674#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin) 675 676extern const unsigned long sata_deb_timing_normal[]; 677extern const unsigned long sata_deb_timing_hotplug[]; 678extern const unsigned long sata_deb_timing_long[]; 679 680extern const struct ata_port_operations ata_dummy_port_ops; 681 682static inline const unsigned long * 683sata_ehc_deb_timing(struct ata_eh_context *ehc) 684{ 685 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) 686 return sata_deb_timing_hotplug; 687 else 688 return sata_deb_timing_normal; 689} 690 691static inline int ata_port_is_dummy(struct ata_port *ap) 692{ 693 return ap->ops == &ata_dummy_port_ops; 694} 695 696extern void ata_port_probe(struct ata_port *); 697extern void __sata_phy_reset(struct ata_port *ap); 698extern void sata_phy_reset(struct ata_port *ap); 699extern void ata_bus_reset(struct ata_port *ap); 700extern int sata_set_spd(struct ata_port *ap); 701extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param); 702extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param); 703extern int ata_std_prereset(struct ata_port *ap); 704extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes); 705extern int sata_port_hardreset(struct ata_port *ap, 706 const unsigned long *timing); 707extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class); 708extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes); 709extern void ata_port_disable(struct ata_port *); 710extern void ata_std_ports(struct ata_ioports *ioaddr); 711#ifdef CONFIG_PCI 712extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 713 unsigned int n_ports); 714extern void ata_pci_remove_one (struct pci_dev *pdev); 715extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); 716extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev); 717extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 718extern int ata_pci_device_resume(struct pci_dev *pdev); 719extern int ata_pci_clear_simplex(struct pci_dev *pdev); 720#endif /* CONFIG_PCI */ 721extern int ata_device_add(const struct ata_probe_ent *ent); 722extern void ata_host_detach(struct ata_host *host); 723extern void ata_host_init(struct ata_host *, struct device *, 724 unsigned long, const struct ata_port_operations *); 725extern void ata_host_remove(struct ata_host *host); 726extern int ata_scsi_detect(struct scsi_host_template *sht); 727extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 728extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 729extern void ata_sas_port_destroy(struct ata_port *); 730extern struct ata_port *ata_sas_port_alloc(struct ata_host *, 731 struct ata_port_info *, struct Scsi_Host *); 732extern int ata_sas_port_init(struct ata_port *); 733extern int ata_sas_port_start(struct ata_port *ap); 734extern void ata_sas_port_stop(struct ata_port *ap); 735extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); 736extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), 737 struct ata_port *ap); 738extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 739extern int sata_scr_valid(struct ata_port *ap); 740extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val); 741extern int sata_scr_write(struct ata_port *ap, int reg, u32 val); 742extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val); 743extern int ata_port_online(struct ata_port *ap); 744extern int ata_port_offline(struct ata_port *ap); 745extern int ata_scsi_device_resume(struct scsi_device *); 746extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t mesg); 747extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); 748extern void ata_host_resume(struct ata_host *host); 749extern int ata_ratelimit(void); 750extern int ata_busy_sleep(struct ata_port *ap, 751 unsigned long timeout_pat, unsigned long timeout); 752extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn, 753 void *data, unsigned long delay); 754extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 755 unsigned long interval_msec, 756 unsigned long timeout_msec); 757 758/* 759 * Default driver ops implementations 760 */ 761extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); 762extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 763extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp); 764extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); 765extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device); 766extern void ata_std_dev_select (struct ata_port *ap, unsigned int device); 767extern u8 ata_check_status(struct ata_port *ap); 768extern u8 ata_altstatus(struct ata_port *ap); 769extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 770extern int ata_port_start (struct ata_port *ap); 771extern void ata_port_stop (struct ata_port *ap); 772extern void ata_host_stop (struct ata_host *host); 773extern irqreturn_t ata_interrupt (int irq, void *dev_instance); 774extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf, 775 unsigned int buflen, int write_data); 776extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf, 777 unsigned int buflen, int write_data); 778extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, 779 unsigned int buflen, int write_data); 780extern void ata_qc_prep(struct ata_queued_cmd *qc); 781extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); 782extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc); 783extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, 784 unsigned int buflen); 785extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 786 unsigned int n_elem); 787extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); 788extern void ata_id_string(const u16 *id, unsigned char *s, 789 unsigned int ofs, unsigned int len); 790extern void ata_id_c_string(const u16 *id, unsigned char *s, 791 unsigned int ofs, unsigned int len); 792extern unsigned long ata_device_blacklisted(const struct ata_device *dev); 793extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 794extern void ata_bmdma_start (struct ata_queued_cmd *qc); 795extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 796extern u8 ata_bmdma_status(struct ata_port *ap); 797extern void ata_bmdma_irq_clear(struct ata_port *ap); 798extern void ata_bmdma_freeze(struct ata_port *ap); 799extern void ata_bmdma_thaw(struct ata_port *ap); 800extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 801 ata_reset_fn_t softreset, 802 ata_reset_fn_t hardreset, 803 ata_postreset_fn_t postreset); 804extern void ata_bmdma_error_handler(struct ata_port *ap); 805extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); 806extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 807 u8 status, int in_wq); 808extern void ata_qc_complete(struct ata_queued_cmd *qc); 809extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, 810 void (*finish_qc)(struct ata_queued_cmd *)); 811extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, 812 void (*done)(struct scsi_cmnd *)); 813extern int ata_std_bios_param(struct scsi_device *sdev, 814 struct block_device *bdev, 815 sector_t capacity, int geom[]); 816extern int ata_scsi_slave_config(struct scsi_device *sdev); 817extern void ata_scsi_slave_destroy(struct scsi_device *sdev); 818extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, 819 int queue_depth); 820extern struct ata_device *ata_dev_pair(struct ata_device *adev); 821 822/* 823 * Timing helpers 824 */ 825 826extern unsigned int ata_pio_need_iordy(const struct ata_device *); 827extern int ata_timing_compute(struct ata_device *, unsigned short, 828 struct ata_timing *, int, int); 829extern void ata_timing_merge(const struct ata_timing *, 830 const struct ata_timing *, struct ata_timing *, 831 unsigned int); 832 833enum { 834 ATA_TIMING_SETUP = (1 << 0), 835 ATA_TIMING_ACT8B = (1 << 1), 836 ATA_TIMING_REC8B = (1 << 2), 837 ATA_TIMING_CYC8B = (1 << 3), 838 ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | 839 ATA_TIMING_CYC8B, 840 ATA_TIMING_ACTIVE = (1 << 4), 841 ATA_TIMING_RECOVER = (1 << 5), 842 ATA_TIMING_CYCLE = (1 << 6), 843 ATA_TIMING_UDMA = (1 << 7), 844 ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | 845 ATA_TIMING_REC8B | ATA_TIMING_CYC8B | 846 ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | 847 ATA_TIMING_CYCLE | ATA_TIMING_UDMA, 848}; 849 850 851#ifdef CONFIG_PCI 852struct pci_bits { 853 unsigned int reg; /* PCI config register to read */ 854 unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ 855 unsigned long mask; 856 unsigned long val; 857}; 858 859extern void ata_pci_host_stop (struct ata_host *host); 860extern struct ata_probe_ent * 861ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask); 862extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); 863extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long); 864#endif /* CONFIG_PCI */ 865 866/* 867 * EH 868 */ 869extern void ata_eng_timeout(struct ata_port *ap); 870 871extern void ata_port_schedule_eh(struct ata_port *ap); 872extern int ata_port_abort(struct ata_port *ap); 873extern int ata_port_freeze(struct ata_port *ap); 874 875extern void ata_eh_freeze_port(struct ata_port *ap); 876extern void ata_eh_thaw_port(struct ata_port *ap); 877 878extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); 879extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); 880 881extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 882 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 883 ata_postreset_fn_t postreset); 884 885/* 886 * printk helpers 887 */ 888#define ata_port_printk(ap, lv, fmt, args...) \ 889 printk(lv"ata%u: "fmt, (ap)->id , ##args) 890 891#define ata_dev_printk(dev, lv, fmt, args...) \ 892 printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args) 893 894/* 895 * ata_eh_info helpers 896 */ 897#define ata_ehi_push_desc(ehi, fmt, args...) do { \ 898 (ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \ 899 ATA_EH_DESC_LEN - (ehi)->desc_len, \ 900 fmt , ##args); \ 901} while (0) 902 903#define ata_ehi_clear_desc(ehi) do { \ 904 (ehi)->desc[0] = '\0'; \ 905 (ehi)->desc_len = 0; \ 906} while (0) 907 908static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi) 909{ 910 if (ehi->flags & ATA_EHI_HOTPLUGGED) 911 return; 912 913 ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK; 914 ehi->hotplug_timestamp = jiffies; 915 916 ehi->action |= ATA_EH_SOFTRESET; 917 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; 918} 919 920static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) 921{ 922 __ata_ehi_hotplugged(ehi); 923 ehi->err_mask |= AC_ERR_ATA_BUS; 924} 925 926/* 927 * qc helpers 928 */ 929static inline int 930ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) 931{ 932 if (sg == &qc->pad_sgent) 933 return 1; 934 if (qc->pad_len) 935 return 0; 936 if (((sg - qc->__sg) + 1) == qc->n_elem) 937 return 1; 938 return 0; 939} 940 941static inline struct scatterlist * 942ata_qc_first_sg(struct ata_queued_cmd *qc) 943{ 944 if (qc->n_elem) 945 return qc->__sg; 946 if (qc->pad_len) 947 return &qc->pad_sgent; 948 return NULL; 949} 950 951static inline struct scatterlist * 952ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc) 953{ 954 if (sg == &qc->pad_sgent) 955 return NULL; 956 if (++sg - qc->__sg < qc->n_elem) 957 return sg; 958 if (qc->pad_len) 959 return &qc->pad_sgent; 960 return NULL; 961} 962 963#define ata_for_each_sg(sg, qc) \ 964 for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc)) 965 966static inline unsigned int ata_tag_valid(unsigned int tag) 967{ 968 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 969} 970 971static inline unsigned int ata_tag_internal(unsigned int tag) 972{ 973 return tag == ATA_MAX_QUEUE - 1; 974} 975 976/* 977 * device helpers 978 */ 979static inline unsigned int ata_class_enabled(unsigned int class) 980{ 981 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI; 982} 983 984static inline unsigned int ata_class_disabled(unsigned int class) 985{ 986 return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP; 987} 988 989static inline unsigned int ata_class_absent(unsigned int class) 990{ 991 return !ata_class_enabled(class) && !ata_class_disabled(class); 992} 993 994static inline unsigned int ata_dev_enabled(const struct ata_device *dev) 995{ 996 return ata_class_enabled(dev->class); 997} 998 999static inline unsigned int ata_dev_disabled(const struct ata_device *dev) 1000{ 1001 return ata_class_disabled(dev->class); 1002} 1003 1004static inline unsigned int ata_dev_absent(const struct ata_device *dev) 1005{ 1006 return ata_class_absent(dev->class); 1007} 1008 1009static inline unsigned int ata_dev_ready(const struct ata_device *dev) 1010{ 1011 return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED); 1012} 1013 1014/* 1015 * port helpers 1016 */ 1017static inline int ata_port_max_devices(const struct ata_port *ap) 1018{ 1019 if (ap->flags & ATA_FLAG_SLAVE_POSS) 1020 return 2; 1021 return 1; 1022} 1023 1024 1025static inline u8 ata_chk_status(struct ata_port *ap) 1026{ 1027 return ap->ops->check_status(ap); 1028} 1029 1030 1031/** 1032 * ata_pause - Flush writes and pause 400 nanoseconds. 1033 * @ap: Port to wait for. 1034 * 1035 * LOCKING: 1036 * Inherited from caller. 1037 */ 1038 1039static inline void ata_pause(struct ata_port *ap) 1040{ 1041 ata_altstatus(ap); 1042 ndelay(400); 1043} 1044 1045 1046/** 1047 * ata_busy_wait - Wait for a port status register 1048 * @ap: Port to wait for. 1049 * @bits: bits that must be clear 1050 * @max: number of 10uS waits to perform 1051 * 1052 * Waits up to max*10 microseconds for the selected bits in the port's 1053 * status register to be cleared. 1054 * Returns final value of status register. 1055 * 1056 * LOCKING: 1057 * Inherited from caller. 1058 */ 1059 1060static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits, 1061 unsigned int max) 1062{ 1063 u8 status; 1064 1065 do { 1066 udelay(10); 1067 status = ata_chk_status(ap); 1068 max--; 1069 } while (status != 0xff && (status & bits) && (max > 0)); 1070 1071 return status; 1072} 1073 1074 1075/** 1076 * ata_wait_idle - Wait for a port to be idle. 1077 * @ap: Port to wait for. 1078 * 1079 * Waits up to 10ms for port's BUSY and DRQ signals to clear. 1080 * Returns final value of status register. 1081 * 1082 * LOCKING: 1083 * Inherited from caller. 1084 */ 1085 1086static inline u8 ata_wait_idle(struct ata_port *ap) 1087{ 1088 u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 1089 1090 if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) { 1091 unsigned long l = ap->ioaddr.status_addr; 1092 if (ata_msg_warn(ap)) 1093 printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n", 1094 status, l); 1095 } 1096 1097 return status; 1098} 1099 1100static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) 1101{ 1102 qc->tf.ctl |= ATA_NIEN; 1103} 1104 1105static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap, 1106 unsigned int tag) 1107{ 1108 if (likely(ata_tag_valid(tag))) 1109 return &ap->qcmd[tag]; 1110 return NULL; 1111} 1112 1113static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap, 1114 unsigned int tag) 1115{ 1116 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1117 1118 if (unlikely(!qc) || !ap->ops->error_handler) 1119 return qc; 1120 1121 if ((qc->flags & (ATA_QCFLAG_ACTIVE | 1122 ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE) 1123 return qc; 1124 1125 return NULL; 1126} 1127 1128static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf) 1129{ 1130 memset(tf, 0, sizeof(*tf)); 1131 1132 tf->ctl = dev->ap->ctl; 1133 if (dev->devno == 0) 1134 tf->device = ATA_DEVICE_OBS; 1135 else 1136 tf->device = ATA_DEVICE_OBS | ATA_DEV1; 1137} 1138 1139static inline void ata_qc_reinit(struct ata_queued_cmd *qc) 1140{ 1141 qc->dma_dir = DMA_NONE; 1142 qc->__sg = NULL; 1143 qc->flags = 0; 1144 qc->cursg = qc->cursg_ofs = 0; 1145 qc->nbytes = qc->curbytes = 0; 1146 qc->n_elem = 0; 1147 qc->err_mask = 0; 1148 qc->pad_len = 0; 1149 1150 ata_tf_init(qc->dev, &qc->tf); 1151 1152 /* init result_tf such that it indicates normal completion */ 1153 qc->result_tf.command = ATA_DRDY; 1154 qc->result_tf.feature = 0; 1155} 1156 1157/** 1158 * ata_irq_ack - Acknowledge a device interrupt. 1159 * @ap: Port on which interrupts are enabled. 1160 * 1161 * Wait up to 10 ms for legacy IDE device to become idle (BUSY 1162 * or BUSY+DRQ clear). Obtain dma status and port status from 1163 * device. Clear the interrupt. Return port status. 1164 * 1165 * LOCKING: 1166 */ 1167 1168static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq) 1169{ 1170 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; 1171 u8 host_stat, post_stat, status; 1172 1173 status = ata_busy_wait(ap, bits, 1000); 1174 if (status & bits) 1175 if (ata_msg_err(ap)) 1176 printk(KERN_ERR "abnormal status 0x%X\n", status); 1177 1178 /* get controller status; clear intr, err bits */ 1179 if (ap->flags & ATA_FLAG_MMIO) { 1180 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 1181 host_stat = readb(mmio + ATA_DMA_STATUS); 1182 writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, 1183 mmio + ATA_DMA_STATUS); 1184 1185 post_stat = readb(mmio + ATA_DMA_STATUS); 1186 } else { 1187 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 1188 outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, 1189 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 1190 1191 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 1192 } 1193 1194 if (ata_msg_intr(ap)) 1195 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", 1196 __FUNCTION__, 1197 host_stat, post_stat, status); 1198 1199 return status; 1200} 1201 1202static inline int ata_try_flush_cache(const struct ata_device *dev) 1203{ 1204 return ata_id_wcache_enabled(dev->id) || 1205 ata_id_has_flush(dev->id) || 1206 ata_id_has_flush_ext(dev->id); 1207} 1208 1209static inline unsigned int ac_err_mask(u8 status) 1210{ 1211 if (status & (ATA_BUSY | ATA_DRQ)) 1212 return AC_ERR_HSM; 1213 if (status & (ATA_ERR | ATA_DF)) 1214 return AC_ERR_DEV; 1215 return 0; 1216} 1217 1218static inline unsigned int __ac_err_mask(u8 status) 1219{ 1220 unsigned int mask = ac_err_mask(status); 1221 if (mask == 0) 1222 return AC_ERR_OTHER; 1223 return mask; 1224} 1225 1226static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev) 1227{ 1228 ap->pad_dma = 0; 1229 ap->pad = dmam_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, 1230 &ap->pad_dma, GFP_KERNEL); 1231 return (ap->pad == NULL) ? -ENOMEM : 0; 1232} 1233 1234static inline void ata_pad_free(struct ata_port *ap, struct device *dev) 1235{ 1236 dmam_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); 1237} 1238 1239static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) 1240{ 1241 return (struct ata_port *) &host->hostdata[0]; 1242} 1243 1244#endif /* __LINUX_LIBATA_H__ */ 1245