libata.h revision baa1e78a834c917984a4659fd282f712c17ee3bf
1/* 2 * Copyright 2003-2005 Red Hat, Inc. All rights reserved. 3 * Copyright 2003-2005 Jeff Garzik 4 * 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2, or (at your option) 9 * any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; see the file COPYING. If not, write to 18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 19 * 20 * 21 * libata documentation is available via 'make {ps|pdf}docs', 22 * as Documentation/DocBook/libata.* 23 * 24 */ 25 26#ifndef __LINUX_LIBATA_H__ 27#define __LINUX_LIBATA_H__ 28 29#include <linux/delay.h> 30#include <linux/interrupt.h> 31#include <linux/pci.h> 32#include <linux/dma-mapping.h> 33#include <asm/scatterlist.h> 34#include <asm/io.h> 35#include <linux/ata.h> 36#include <linux/workqueue.h> 37#include <scsi/scsi_host.h> 38 39/* 40 * Define if arch has non-standard setup. This is a _PCI_ standard 41 * not a legacy or ISA standard. 42 */ 43#ifdef CONFIG_ATA_NONSTANDARD 44#include <asm/libata-portmap.h> 45#else 46#include <asm-generic/libata-portmap.h> 47#endif 48 49/* 50 * compile-time options: to be removed as soon as all the drivers are 51 * converted to the new debugging mechanism 52 */ 53#undef ATA_DEBUG /* debugging output */ 54#undef ATA_VERBOSE_DEBUG /* yet more debugging output */ 55#undef ATA_IRQ_TRAP /* define to ack screaming irqs */ 56#undef ATA_NDEBUG /* define to disable quick runtime checks */ 57#define ATA_ENABLE_PATA /* define to enable PATA support in some 58 * low-level drivers */ 59 60 61/* note: prints function name for you */ 62#ifdef ATA_DEBUG 63#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 64#ifdef ATA_VERBOSE_DEBUG 65#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 66#else 67#define VPRINTK(fmt, args...) 68#endif /* ATA_VERBOSE_DEBUG */ 69#else 70#define DPRINTK(fmt, args...) 71#define VPRINTK(fmt, args...) 72#endif /* ATA_DEBUG */ 73 74#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 75 76/* NEW: debug levels */ 77#define HAVE_LIBATA_MSG 1 78 79enum { 80 ATA_MSG_DRV = 0x0001, 81 ATA_MSG_INFO = 0x0002, 82 ATA_MSG_PROBE = 0x0004, 83 ATA_MSG_WARN = 0x0008, 84 ATA_MSG_MALLOC = 0x0010, 85 ATA_MSG_CTL = 0x0020, 86 ATA_MSG_INTR = 0x0040, 87 ATA_MSG_ERR = 0x0080, 88}; 89 90#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV) 91#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO) 92#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE) 93#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN) 94#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC) 95#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL) 96#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR) 97#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR) 98 99static inline u32 ata_msg_init(int dval, int default_msg_enable_bits) 100{ 101 if (dval < 0 || dval >= (sizeof(u32) * 8)) 102 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */ 103 if (!dval) 104 return 0; 105 return (1 << dval) - 1; 106} 107 108/* defines only for the constants which don't work well as enums */ 109#define ATA_TAG_POISON 0xfafbfcfdU 110 111/* move to PCI layer? */ 112#define PCI_VDEVICE(vendor, device) \ 113 PCI_VENDOR_ID_##vendor, (device), \ 114 PCI_ANY_ID, PCI_ANY_ID, 0, 0 115 116static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) 117{ 118 return &pdev->dev; 119} 120 121enum { 122 /* various global constants */ 123 LIBATA_MAX_PRD = ATA_MAX_PRD / 2, 124 ATA_MAX_PORTS = 8, 125 ATA_DEF_QUEUE = 1, 126 /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */ 127 ATA_MAX_QUEUE = 32, 128 ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1, 129 ATA_MAX_BUS = 2, 130 ATA_DEF_BUSY_WAIT = 10000, 131 ATA_SHORT_PAUSE = (HZ >> 6) + 1, 132 133 ATA_SHT_EMULATED = 1, 134 ATA_SHT_CMD_PER_LUN = 1, 135 ATA_SHT_THIS_ID = -1, 136 ATA_SHT_USE_CLUSTERING = 1, 137 138 /* struct ata_device stuff */ 139 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ 140 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ 141 ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */ 142 ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */ 143 ATA_DFLAG_CFG_MASK = (1 << 8) - 1, 144 145 ATA_DFLAG_PIO = (1 << 8), /* device limited to PIO mode */ 146 ATA_DFLAG_NCQ_OFF = (1 << 9), /* device limited to non-NCQ mode */ 147 ATA_DFLAG_SUSPENDED = (1 << 10), /* device suspended */ 148 ATA_DFLAG_INIT_MASK = (1 << 16) - 1, 149 150 ATA_DFLAG_DETACH = (1 << 16), 151 ATA_DFLAG_DETACHED = (1 << 17), 152 153 ATA_DEV_UNKNOWN = 0, /* unknown device */ 154 ATA_DEV_ATA = 1, /* ATA device */ 155 ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ 156 ATA_DEV_ATAPI = 3, /* ATAPI device */ 157 ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */ 158 ATA_DEV_NONE = 5, /* no device */ 159 160 /* struct ata_port flags */ 161 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ 162 /* (doesn't imply presence) */ 163 ATA_FLAG_SATA = (1 << 1), 164 ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */ 165 ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */ 166 ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */ 167 ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */ 168 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ 169 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ 170 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ 171 ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD 172 * doesn't handle PIO interrupts */ 173 ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */ 174 ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */ 175 ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H 176 * Register FIS clearing BSY */ 177 ATA_FLAG_DEBUGMSG = (1 << 13), 178 179 /* The following flag belongs to ap->pflags but is kept in 180 * ap->flags because it's referenced in many LLDs and will be 181 * removed in not-too-distant future. 182 */ 183 ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */ 184 185 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 186 187 /* struct ata_port pflags */ 188 ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ 189 ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ 190 ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ 191 ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ 192 ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ 193 ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */ 194 ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ 195 196 ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */ 197 ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ 198 ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ 199 200 /* struct ata_queued_cmd flags */ 201 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ 202 ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */ 203 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */ 204 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 205 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ 206 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ 207 208 ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ 209 ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ 210 ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */ 211 212 /* host set flags */ 213 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */ 214 215 /* various lengths of time */ 216 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ 217 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ 218 ATA_TMOUT_INTERNAL = 30 * HZ, 219 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, 220 221 /* ATA bus states */ 222 BUS_UNKNOWN = 0, 223 BUS_DMA = 1, 224 BUS_IDLE = 2, 225 BUS_NOINTR = 3, 226 BUS_NODATA = 4, 227 BUS_TIMER = 5, 228 BUS_PIO = 6, 229 BUS_EDD = 7, 230 BUS_IDENTIFY = 8, 231 BUS_PACKET = 9, 232 233 /* SATA port states */ 234 PORT_UNKNOWN = 0, 235 PORT_ENABLED = 1, 236 PORT_DISABLED = 2, 237 238 /* encoding various smaller bitmaps into a single 239 * unsigned int bitmap 240 */ 241 ATA_BITS_PIO = 7, 242 ATA_BITS_MWDMA = 5, 243 ATA_BITS_UDMA = 8, 244 245 ATA_SHIFT_PIO = 0, 246 ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_BITS_PIO, 247 ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_BITS_MWDMA, 248 249 ATA_MASK_PIO = ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO, 250 ATA_MASK_MWDMA = ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA, 251 ATA_MASK_UDMA = ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA, 252 253 /* size of buffer to pad xfers ending on unaligned boundaries */ 254 ATA_DMA_PAD_SZ = 4, 255 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, 256 257 /* masks for port functions */ 258 ATA_PORT_PRIMARY = (1 << 0), 259 ATA_PORT_SECONDARY = (1 << 1), 260 261 /* ering size */ 262 ATA_ERING_SIZE = 32, 263 264 /* desc_len for ata_eh_info and context */ 265 ATA_EH_DESC_LEN = 80, 266 267 /* reset / recovery action types */ 268 ATA_EH_REVALIDATE = (1 << 0), 269 ATA_EH_SOFTRESET = (1 << 1), 270 ATA_EH_HARDRESET = (1 << 2), 271 ATA_EH_SUSPEND = (1 << 3), 272 ATA_EH_RESUME = (1 << 4), 273 ATA_EH_PM_FREEZE = (1 << 5), 274 275 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, 276 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_SUSPEND | 277 ATA_EH_RESUME | ATA_EH_PM_FREEZE, 278 279 /* ata_eh_info->flags */ 280 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 281 ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */ 282 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ 283 ATA_EHI_QUIET = (1 << 3), /* be quiet */ 284 285 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */ 286 ATA_EHI_PRINTINFO = (1 << 17), /* print configuration info */ 287 ATA_EHI_SETMODE = (1 << 18), /* configure transfer mode */ 288 ATA_EHI_POST_SETMODE = (1 << 19), /* revaildating after setmode */ 289 290 ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK, 291 292 /* max repeat if error condition is still set after ->error_handler */ 293 ATA_EH_MAX_REPEAT = 5, 294 295 /* how hard are we gonna try to probe/recover devices */ 296 ATA_PROBE_MAX_TRIES = 3, 297 ATA_EH_RESET_TRIES = 3, 298 ATA_EH_DEV_TRIES = 3, 299 300 /* Drive spinup time (time from power-on to the first D2H FIS) 301 * in msecs - 8s currently. Failing to get ready in this time 302 * isn't critical. It will result in reset failure for 303 * controllers which can't wait for the first D2H FIS. libata 304 * will retry, so it just has to be long enough to spin up 305 * most devices. 306 */ 307 ATA_SPINUP_WAIT = 8000, 308 309 /* Horkage types. May be set by libata or controller on drives 310 (some horkage may be drive/controller pair dependant */ 311 312 ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */ 313 ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */ 314 ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */ 315}; 316 317enum hsm_task_states { 318 HSM_ST_UNKNOWN, /* state unknown */ 319 HSM_ST_IDLE, /* no command on going */ 320 HSM_ST, /* (waiting the device to) transfer data */ 321 HSM_ST_LAST, /* (waiting the device to) complete command */ 322 HSM_ST_ERR, /* error */ 323 HSM_ST_FIRST, /* (waiting the device to) 324 write CDB or first data block */ 325}; 326 327enum ata_completion_errors { 328 AC_ERR_DEV = (1 << 0), /* device reported error */ 329 AC_ERR_HSM = (1 << 1), /* host state machine violation */ 330 AC_ERR_TIMEOUT = (1 << 2), /* timeout */ 331 AC_ERR_MEDIA = (1 << 3), /* media error */ 332 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */ 333 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */ 334 AC_ERR_SYSTEM = (1 << 6), /* system error */ 335 AC_ERR_INVALID = (1 << 7), /* invalid argument */ 336 AC_ERR_OTHER = (1 << 8), /* unknown */ 337}; 338 339/* forward declarations */ 340struct scsi_device; 341struct ata_port_operations; 342struct ata_port; 343struct ata_queued_cmd; 344 345/* typedefs */ 346typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 347typedef int (*ata_prereset_fn_t)(struct ata_port *ap); 348typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes); 349typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes); 350 351struct ata_ioports { 352 unsigned long cmd_addr; 353 unsigned long data_addr; 354 unsigned long error_addr; 355 unsigned long feature_addr; 356 unsigned long nsect_addr; 357 unsigned long lbal_addr; 358 unsigned long lbam_addr; 359 unsigned long lbah_addr; 360 unsigned long device_addr; 361 unsigned long status_addr; 362 unsigned long command_addr; 363 unsigned long altstatus_addr; 364 unsigned long ctl_addr; 365 unsigned long bmdma_addr; 366 unsigned long scr_addr; 367}; 368 369struct ata_probe_ent { 370 struct list_head node; 371 struct device *dev; 372 const struct ata_port_operations *port_ops; 373 struct scsi_host_template *sht; 374 struct ata_ioports port[ATA_MAX_PORTS]; 375 unsigned int n_ports; 376 unsigned int dummy_port_mask; 377 unsigned int pio_mask; 378 unsigned int mwdma_mask; 379 unsigned int udma_mask; 380 unsigned long irq; 381 unsigned long irq2; 382 unsigned int irq_flags; 383 unsigned long port_flags; 384 unsigned long _host_flags; 385 void __iomem *mmio_base; 386 void *private_data; 387 388 /* port_info for the secondary port. Together with irq2, it's 389 * used to implement non-uniform secondary port. Currently, 390 * the only user is ata_piix combined mode. This workaround 391 * will be removed together with ata_probe_ent when init model 392 * is updated. 393 */ 394 const struct ata_port_info *pinfo2; 395}; 396 397struct ata_host { 398 spinlock_t lock; 399 struct device *dev; 400 unsigned long irq; 401 unsigned long irq2; 402 void __iomem *mmio_base; 403 unsigned int n_ports; 404 void *private_data; 405 const struct ata_port_operations *ops; 406 unsigned long flags; 407 int simplex_claimed; /* Keep seperate in case we 408 ever need to do this locked */ 409 struct ata_port *ports[0]; 410}; 411 412struct ata_queued_cmd { 413 struct ata_port *ap; 414 struct ata_device *dev; 415 416 struct scsi_cmnd *scsicmd; 417 void (*scsidone)(struct scsi_cmnd *); 418 419 struct ata_taskfile tf; 420 u8 cdb[ATAPI_CDB_LEN]; 421 422 unsigned long flags; /* ATA_QCFLAG_xxx */ 423 unsigned int tag; 424 unsigned int n_elem; 425 unsigned int orig_n_elem; 426 427 int dma_dir; 428 429 unsigned int pad_len; 430 431 unsigned int nsect; 432 unsigned int cursect; 433 434 unsigned int nbytes; 435 unsigned int curbytes; 436 437 unsigned int cursg; 438 unsigned int cursg_ofs; 439 440 struct scatterlist sgent; 441 struct scatterlist pad_sgent; 442 void *buf_virt; 443 444 /* DO NOT iterate over __sg manually, use ata_for_each_sg() */ 445 struct scatterlist *__sg; 446 447 unsigned int err_mask; 448 struct ata_taskfile result_tf; 449 ata_qc_cb_t complete_fn; 450 451 void *private_data; 452}; 453 454struct ata_port_stats { 455 unsigned long unhandled_irq; 456 unsigned long idle_irq; 457 unsigned long rw_reqbuf; 458}; 459 460struct ata_ering_entry { 461 int is_io; 462 unsigned int err_mask; 463 u64 timestamp; 464}; 465 466struct ata_ering { 467 int cursor; 468 struct ata_ering_entry ring[ATA_ERING_SIZE]; 469}; 470 471struct ata_device { 472 struct ata_port *ap; 473 unsigned int devno; /* 0 or 1 */ 474 unsigned long flags; /* ATA_DFLAG_xxx */ 475 struct scsi_device *sdev; /* attached SCSI device */ 476 /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */ 477 u64 n_sectors; /* size of device, if ATA */ 478 unsigned int class; /* ATA_DEV_xxx */ 479 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ 480 u8 pio_mode; 481 u8 dma_mode; 482 u8 xfer_mode; 483 unsigned int xfer_shift; /* ATA_SHIFT_xxx */ 484 485 unsigned int multi_count; /* sectors count for 486 READ/WRITE MULTIPLE */ 487 unsigned int max_sectors; /* per-device max sectors */ 488 unsigned int cdb_len; 489 490 /* per-dev xfer mask */ 491 unsigned int pio_mask; 492 unsigned int mwdma_mask; 493 unsigned int udma_mask; 494 495 /* for CHS addressing */ 496 u16 cylinders; /* Number of cylinders */ 497 u16 heads; /* Number of heads */ 498 u16 sectors; /* Number of sectors per track */ 499 500 /* error history */ 501 struct ata_ering ering; 502 unsigned int horkage; /* List of broken features */ 503}; 504 505/* Offset into struct ata_device. Fields above it are maintained 506 * acress device init. Fields below are zeroed. 507 */ 508#define ATA_DEVICE_CLEAR_OFFSET offsetof(struct ata_device, n_sectors) 509 510struct ata_eh_info { 511 struct ata_device *dev; /* offending device */ 512 u32 serror; /* SError from LLDD */ 513 unsigned int err_mask; /* port-wide err_mask */ 514 unsigned int action; /* ATA_EH_* action mask */ 515 unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */ 516 unsigned int flags; /* ATA_EHI_* flags */ 517 518 unsigned long hotplug_timestamp; 519 unsigned int probe_mask; 520 521 char desc[ATA_EH_DESC_LEN]; 522 int desc_len; 523}; 524 525struct ata_eh_context { 526 struct ata_eh_info i; 527 int tries[ATA_MAX_DEVICES]; 528 unsigned int classes[ATA_MAX_DEVICES]; 529 unsigned int did_probe_mask; 530}; 531 532struct ata_port { 533 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ 534 const struct ata_port_operations *ops; 535 spinlock_t *lock; 536 unsigned long flags; /* ATA_FLAG_xxx */ 537 unsigned int pflags; /* ATA_PFLAG_xxx */ 538 unsigned int id; /* unique id req'd by scsi midlyr */ 539 unsigned int port_no; /* unique port #; from zero */ 540 541 struct ata_prd *prd; /* our SG list */ 542 dma_addr_t prd_dma; /* and its DMA mapping */ 543 544 void *pad; /* array of DMA pad buffers */ 545 dma_addr_t pad_dma; 546 547 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ 548 549 u8 ctl; /* cache of ATA control register */ 550 u8 last_ctl; /* Cache last written value */ 551 unsigned int pio_mask; 552 unsigned int mwdma_mask; 553 unsigned int udma_mask; 554 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 555 unsigned int hw_sata_spd_limit; 556 unsigned int sata_spd_limit; /* SATA PHY speed limit */ 557 558 /* record runtime error info, protected by host lock */ 559 struct ata_eh_info eh_info; 560 /* EH context owned by EH */ 561 struct ata_eh_context eh_context; 562 563 struct ata_device device[ATA_MAX_DEVICES]; 564 565 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; 566 unsigned long qc_allocated; 567 unsigned int qc_active; 568 569 unsigned int active_tag; 570 u32 sactive; 571 572 struct ata_port_stats stats; 573 struct ata_host *host; 574 struct device *dev; 575 576 struct work_struct port_task; 577 struct work_struct hotplug_task; 578 struct work_struct scsi_rescan_task; 579 580 unsigned int hsm_task_state; 581 582 u32 msg_enable; 583 struct list_head eh_done_q; 584 wait_queue_head_t eh_wait_q; 585 586 pm_message_t pm_mesg; 587 int *pm_result; 588 589 void *private_data; 590 591 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ 592}; 593 594struct ata_port_operations { 595 void (*port_disable) (struct ata_port *); 596 597 void (*dev_config) (struct ata_port *, struct ata_device *); 598 599 void (*set_piomode) (struct ata_port *, struct ata_device *); 600 void (*set_dmamode) (struct ata_port *, struct ata_device *); 601 unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long); 602 603 void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf); 604 void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf); 605 606 void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf); 607 u8 (*check_status)(struct ata_port *ap); 608 u8 (*check_altstatus)(struct ata_port *ap); 609 void (*dev_select)(struct ata_port *ap, unsigned int device); 610 611 void (*phy_reset) (struct ata_port *ap); /* obsolete */ 612 void (*set_mode) (struct ata_port *ap); 613 614 void (*post_set_mode) (struct ata_port *ap); 615 616 int (*check_atapi_dma) (struct ata_queued_cmd *qc); 617 618 void (*bmdma_setup) (struct ata_queued_cmd *qc); 619 void (*bmdma_start) (struct ata_queued_cmd *qc); 620 621 void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int); 622 623 void (*qc_prep) (struct ata_queued_cmd *qc); 624 unsigned int (*qc_issue) (struct ata_queued_cmd *qc); 625 626 /* Error handlers. ->error_handler overrides ->eng_timeout and 627 * indicates that new-style EH is in place. 628 */ 629 void (*eng_timeout) (struct ata_port *ap); /* obsolete */ 630 631 void (*freeze) (struct ata_port *ap); 632 void (*thaw) (struct ata_port *ap); 633 void (*error_handler) (struct ata_port *ap); 634 void (*post_internal_cmd) (struct ata_queued_cmd *qc); 635 636 irq_handler_t irq_handler; 637 void (*irq_clear) (struct ata_port *); 638 639 u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg); 640 void (*scr_write) (struct ata_port *ap, unsigned int sc_reg, 641 u32 val); 642 643 int (*port_suspend) (struct ata_port *ap, pm_message_t mesg); 644 int (*port_resume) (struct ata_port *ap); 645 646 int (*port_start) (struct ata_port *ap); 647 void (*port_stop) (struct ata_port *ap); 648 649 void (*host_stop) (struct ata_host *host); 650 651 void (*bmdma_stop) (struct ata_queued_cmd *qc); 652 u8 (*bmdma_status) (struct ata_port *ap); 653}; 654 655struct ata_port_info { 656 struct scsi_host_template *sht; 657 unsigned long flags; 658 unsigned long pio_mask; 659 unsigned long mwdma_mask; 660 unsigned long udma_mask; 661 const struct ata_port_operations *port_ops; 662 void *private_data; 663}; 664 665struct ata_timing { 666 unsigned short mode; /* ATA mode */ 667 unsigned short setup; /* t1 */ 668 unsigned short act8b; /* t2 for 8-bit I/O */ 669 unsigned short rec8b; /* t2i for 8-bit I/O */ 670 unsigned short cyc8b; /* t0 for 8-bit I/O */ 671 unsigned short active; /* t2 or tD */ 672 unsigned short recover; /* t2i or tK */ 673 unsigned short cycle; /* t0 */ 674 unsigned short udma; /* t2CYCTYP/2 */ 675}; 676 677#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin) 678 679extern const unsigned long sata_deb_timing_normal[]; 680extern const unsigned long sata_deb_timing_hotplug[]; 681extern const unsigned long sata_deb_timing_long[]; 682 683extern const struct ata_port_operations ata_dummy_port_ops; 684 685static inline const unsigned long * 686sata_ehc_deb_timing(struct ata_eh_context *ehc) 687{ 688 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) 689 return sata_deb_timing_hotplug; 690 else 691 return sata_deb_timing_normal; 692} 693 694static inline int ata_port_is_dummy(struct ata_port *ap) 695{ 696 return ap->ops == &ata_dummy_port_ops; 697} 698 699extern void ata_port_probe(struct ata_port *); 700extern void __sata_phy_reset(struct ata_port *ap); 701extern void sata_phy_reset(struct ata_port *ap); 702extern void ata_bus_reset(struct ata_port *ap); 703extern int sata_set_spd(struct ata_port *ap); 704extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param); 705extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param); 706extern int ata_std_prereset(struct ata_port *ap); 707extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes); 708extern int sata_port_hardreset(struct ata_port *ap, 709 const unsigned long *timing); 710extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class); 711extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes); 712extern void ata_port_disable(struct ata_port *); 713extern void ata_std_ports(struct ata_ioports *ioaddr); 714#ifdef CONFIG_PCI 715extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 716 unsigned int n_ports); 717extern void ata_pci_remove_one (struct pci_dev *pdev); 718extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); 719extern void ata_pci_device_do_resume(struct pci_dev *pdev); 720extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 721extern int ata_pci_device_resume(struct pci_dev *pdev); 722extern int ata_pci_clear_simplex(struct pci_dev *pdev); 723#endif /* CONFIG_PCI */ 724extern int ata_device_add(const struct ata_probe_ent *ent); 725extern void ata_port_detach(struct ata_port *ap); 726extern void ata_host_init(struct ata_host *, struct device *, 727 unsigned long, const struct ata_port_operations *); 728extern void ata_host_remove(struct ata_host *host); 729extern int ata_scsi_detect(struct scsi_host_template *sht); 730extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 731extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 732extern int ata_scsi_release(struct Scsi_Host *host); 733extern void ata_sas_port_destroy(struct ata_port *); 734extern struct ata_port *ata_sas_port_alloc(struct ata_host *, 735 struct ata_port_info *, struct Scsi_Host *); 736extern int ata_sas_port_init(struct ata_port *); 737extern int ata_sas_port_start(struct ata_port *ap); 738extern void ata_sas_port_stop(struct ata_port *ap); 739extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); 740extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), 741 struct ata_port *ap); 742extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 743extern int sata_scr_valid(struct ata_port *ap); 744extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val); 745extern int sata_scr_write(struct ata_port *ap, int reg, u32 val); 746extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val); 747extern int ata_port_online(struct ata_port *ap); 748extern int ata_port_offline(struct ata_port *ap); 749extern int ata_scsi_device_resume(struct scsi_device *); 750extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t mesg); 751extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); 752extern void ata_host_resume(struct ata_host *host); 753extern int ata_ratelimit(void); 754extern int ata_busy_sleep(struct ata_port *ap, 755 unsigned long timeout_pat, unsigned long timeout); 756extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), 757 void *data, unsigned long delay); 758extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 759 unsigned long interval_msec, 760 unsigned long timeout_msec); 761 762/* 763 * Default driver ops implementations 764 */ 765extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); 766extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 767extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp); 768extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); 769extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device); 770extern void ata_std_dev_select (struct ata_port *ap, unsigned int device); 771extern u8 ata_check_status(struct ata_port *ap); 772extern u8 ata_altstatus(struct ata_port *ap); 773extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 774extern int ata_port_start (struct ata_port *ap); 775extern void ata_port_stop (struct ata_port *ap); 776extern void ata_host_stop (struct ata_host *host); 777extern irqreturn_t ata_interrupt (int irq, void *dev_instance); 778extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf, 779 unsigned int buflen, int write_data); 780extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf, 781 unsigned int buflen, int write_data); 782extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, 783 unsigned int buflen, int write_data); 784extern void ata_qc_prep(struct ata_queued_cmd *qc); 785extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); 786extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc); 787extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, 788 unsigned int buflen); 789extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 790 unsigned int n_elem); 791extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); 792extern void ata_id_string(const u16 *id, unsigned char *s, 793 unsigned int ofs, unsigned int len); 794extern void ata_id_c_string(const u16 *id, unsigned char *s, 795 unsigned int ofs, unsigned int len); 796extern unsigned long ata_device_blacklisted(const struct ata_device *dev); 797extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 798extern void ata_bmdma_start (struct ata_queued_cmd *qc); 799extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 800extern u8 ata_bmdma_status(struct ata_port *ap); 801extern void ata_bmdma_irq_clear(struct ata_port *ap); 802extern void ata_bmdma_freeze(struct ata_port *ap); 803extern void ata_bmdma_thaw(struct ata_port *ap); 804extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 805 ata_reset_fn_t softreset, 806 ata_reset_fn_t hardreset, 807 ata_postreset_fn_t postreset); 808extern void ata_bmdma_error_handler(struct ata_port *ap); 809extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); 810extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 811 u8 status, int in_wq); 812extern void ata_qc_complete(struct ata_queued_cmd *qc); 813extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, 814 void (*finish_qc)(struct ata_queued_cmd *)); 815extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, 816 void (*done)(struct scsi_cmnd *)); 817extern int ata_std_bios_param(struct scsi_device *sdev, 818 struct block_device *bdev, 819 sector_t capacity, int geom[]); 820extern int ata_scsi_slave_config(struct scsi_device *sdev); 821extern void ata_scsi_slave_destroy(struct scsi_device *sdev); 822extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, 823 int queue_depth); 824extern struct ata_device *ata_dev_pair(struct ata_device *adev); 825 826/* 827 * Timing helpers 828 */ 829 830extern unsigned int ata_pio_need_iordy(const struct ata_device *); 831extern int ata_timing_compute(struct ata_device *, unsigned short, 832 struct ata_timing *, int, int); 833extern void ata_timing_merge(const struct ata_timing *, 834 const struct ata_timing *, struct ata_timing *, 835 unsigned int); 836 837enum { 838 ATA_TIMING_SETUP = (1 << 0), 839 ATA_TIMING_ACT8B = (1 << 1), 840 ATA_TIMING_REC8B = (1 << 2), 841 ATA_TIMING_CYC8B = (1 << 3), 842 ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | 843 ATA_TIMING_CYC8B, 844 ATA_TIMING_ACTIVE = (1 << 4), 845 ATA_TIMING_RECOVER = (1 << 5), 846 ATA_TIMING_CYCLE = (1 << 6), 847 ATA_TIMING_UDMA = (1 << 7), 848 ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | 849 ATA_TIMING_REC8B | ATA_TIMING_CYC8B | 850 ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | 851 ATA_TIMING_CYCLE | ATA_TIMING_UDMA, 852}; 853 854 855#ifdef CONFIG_PCI 856struct pci_bits { 857 unsigned int reg; /* PCI config register to read */ 858 unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ 859 unsigned long mask; 860 unsigned long val; 861}; 862 863extern void ata_pci_host_stop (struct ata_host *host); 864extern struct ata_probe_ent * 865ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask); 866extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); 867extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long); 868#endif /* CONFIG_PCI */ 869 870/* 871 * EH 872 */ 873extern void ata_eng_timeout(struct ata_port *ap); 874 875extern void ata_port_schedule_eh(struct ata_port *ap); 876extern int ata_port_abort(struct ata_port *ap); 877extern int ata_port_freeze(struct ata_port *ap); 878 879extern void ata_eh_freeze_port(struct ata_port *ap); 880extern void ata_eh_thaw_port(struct ata_port *ap); 881 882extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); 883extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); 884 885extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 886 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 887 ata_postreset_fn_t postreset); 888 889/* 890 * printk helpers 891 */ 892#define ata_port_printk(ap, lv, fmt, args...) \ 893 printk(lv"ata%u: "fmt, (ap)->id , ##args) 894 895#define ata_dev_printk(dev, lv, fmt, args...) \ 896 printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args) 897 898/* 899 * ata_eh_info helpers 900 */ 901#define ata_ehi_push_desc(ehi, fmt, args...) do { \ 902 (ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \ 903 ATA_EH_DESC_LEN - (ehi)->desc_len, \ 904 fmt , ##args); \ 905} while (0) 906 907#define ata_ehi_clear_desc(ehi) do { \ 908 (ehi)->desc[0] = '\0'; \ 909 (ehi)->desc_len = 0; \ 910} while (0) 911 912static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi) 913{ 914 if (ehi->flags & ATA_EHI_HOTPLUGGED) 915 return; 916 917 ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK; 918 ehi->hotplug_timestamp = jiffies; 919 920 ehi->action |= ATA_EH_SOFTRESET; 921 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; 922} 923 924static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) 925{ 926 __ata_ehi_hotplugged(ehi); 927 ehi->err_mask |= AC_ERR_ATA_BUS; 928} 929 930/* 931 * qc helpers 932 */ 933static inline int 934ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) 935{ 936 if (sg == &qc->pad_sgent) 937 return 1; 938 if (qc->pad_len) 939 return 0; 940 if (((sg - qc->__sg) + 1) == qc->n_elem) 941 return 1; 942 return 0; 943} 944 945static inline struct scatterlist * 946ata_qc_first_sg(struct ata_queued_cmd *qc) 947{ 948 if (qc->n_elem) 949 return qc->__sg; 950 if (qc->pad_len) 951 return &qc->pad_sgent; 952 return NULL; 953} 954 955static inline struct scatterlist * 956ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc) 957{ 958 if (sg == &qc->pad_sgent) 959 return NULL; 960 if (++sg - qc->__sg < qc->n_elem) 961 return sg; 962 if (qc->pad_len) 963 return &qc->pad_sgent; 964 return NULL; 965} 966 967#define ata_for_each_sg(sg, qc) \ 968 for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc)) 969 970static inline unsigned int ata_tag_valid(unsigned int tag) 971{ 972 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 973} 974 975static inline unsigned int ata_tag_internal(unsigned int tag) 976{ 977 return tag == ATA_MAX_QUEUE - 1; 978} 979 980/* 981 * device helpers 982 */ 983static inline unsigned int ata_class_enabled(unsigned int class) 984{ 985 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI; 986} 987 988static inline unsigned int ata_class_disabled(unsigned int class) 989{ 990 return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP; 991} 992 993static inline unsigned int ata_class_absent(unsigned int class) 994{ 995 return !ata_class_enabled(class) && !ata_class_disabled(class); 996} 997 998static inline unsigned int ata_dev_enabled(const struct ata_device *dev) 999{ 1000 return ata_class_enabled(dev->class); 1001} 1002 1003static inline unsigned int ata_dev_disabled(const struct ata_device *dev) 1004{ 1005 return ata_class_disabled(dev->class); 1006} 1007 1008static inline unsigned int ata_dev_absent(const struct ata_device *dev) 1009{ 1010 return ata_class_absent(dev->class); 1011} 1012 1013static inline unsigned int ata_dev_ready(const struct ata_device *dev) 1014{ 1015 return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED); 1016} 1017 1018/* 1019 * port helpers 1020 */ 1021static inline int ata_port_max_devices(const struct ata_port *ap) 1022{ 1023 if (ap->flags & ATA_FLAG_SLAVE_POSS) 1024 return 2; 1025 return 1; 1026} 1027 1028 1029static inline u8 ata_chk_status(struct ata_port *ap) 1030{ 1031 return ap->ops->check_status(ap); 1032} 1033 1034 1035/** 1036 * ata_pause - Flush writes and pause 400 nanoseconds. 1037 * @ap: Port to wait for. 1038 * 1039 * LOCKING: 1040 * Inherited from caller. 1041 */ 1042 1043static inline void ata_pause(struct ata_port *ap) 1044{ 1045 ata_altstatus(ap); 1046 ndelay(400); 1047} 1048 1049 1050/** 1051 * ata_busy_wait - Wait for a port status register 1052 * @ap: Port to wait for. 1053 * 1054 * Waits up to max*10 microseconds for the selected bits in the port's 1055 * status register to be cleared. 1056 * Returns final value of status register. 1057 * 1058 * LOCKING: 1059 * Inherited from caller. 1060 */ 1061 1062static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits, 1063 unsigned int max) 1064{ 1065 u8 status; 1066 1067 do { 1068 udelay(10); 1069 status = ata_chk_status(ap); 1070 max--; 1071 } while (status != 0xff && (status & bits) && (max > 0)); 1072 1073 return status; 1074} 1075 1076 1077/** 1078 * ata_wait_idle - Wait for a port to be idle. 1079 * @ap: Port to wait for. 1080 * 1081 * Waits up to 10ms for port's BUSY and DRQ signals to clear. 1082 * Returns final value of status register. 1083 * 1084 * LOCKING: 1085 * Inherited from caller. 1086 */ 1087 1088static inline u8 ata_wait_idle(struct ata_port *ap) 1089{ 1090 u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 1091 1092 if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) { 1093 unsigned long l = ap->ioaddr.status_addr; 1094 if (ata_msg_warn(ap)) 1095 printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n", 1096 status, l); 1097 } 1098 1099 return status; 1100} 1101 1102static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) 1103{ 1104 qc->tf.ctl |= ATA_NIEN; 1105} 1106 1107static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap, 1108 unsigned int tag) 1109{ 1110 if (likely(ata_tag_valid(tag))) 1111 return &ap->qcmd[tag]; 1112 return NULL; 1113} 1114 1115static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap, 1116 unsigned int tag) 1117{ 1118 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1119 1120 if (unlikely(!qc) || !ap->ops->error_handler) 1121 return qc; 1122 1123 if ((qc->flags & (ATA_QCFLAG_ACTIVE | 1124 ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE) 1125 return qc; 1126 1127 return NULL; 1128} 1129 1130static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf) 1131{ 1132 memset(tf, 0, sizeof(*tf)); 1133 1134 tf->ctl = dev->ap->ctl; 1135 if (dev->devno == 0) 1136 tf->device = ATA_DEVICE_OBS; 1137 else 1138 tf->device = ATA_DEVICE_OBS | ATA_DEV1; 1139} 1140 1141static inline void ata_qc_reinit(struct ata_queued_cmd *qc) 1142{ 1143 qc->__sg = NULL; 1144 qc->flags = 0; 1145 qc->cursect = qc->cursg = qc->cursg_ofs = 0; 1146 qc->nsect = 0; 1147 qc->nbytes = qc->curbytes = 0; 1148 qc->err_mask = 0; 1149 1150 ata_tf_init(qc->dev, &qc->tf); 1151 1152 /* init result_tf such that it indicates normal completion */ 1153 qc->result_tf.command = ATA_DRDY; 1154 qc->result_tf.feature = 0; 1155} 1156 1157/** 1158 * ata_irq_ack - Acknowledge a device interrupt. 1159 * @ap: Port on which interrupts are enabled. 1160 * 1161 * Wait up to 10 ms for legacy IDE device to become idle (BUSY 1162 * or BUSY+DRQ clear). Obtain dma status and port status from 1163 * device. Clear the interrupt. Return port status. 1164 * 1165 * LOCKING: 1166 */ 1167 1168static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq) 1169{ 1170 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; 1171 u8 host_stat, post_stat, status; 1172 1173 status = ata_busy_wait(ap, bits, 1000); 1174 if (status & bits) 1175 if (ata_msg_err(ap)) 1176 printk(KERN_ERR "abnormal status 0x%X\n", status); 1177 1178 /* get controller status; clear intr, err bits */ 1179 if (ap->flags & ATA_FLAG_MMIO) { 1180 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 1181 host_stat = readb(mmio + ATA_DMA_STATUS); 1182 writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, 1183 mmio + ATA_DMA_STATUS); 1184 1185 post_stat = readb(mmio + ATA_DMA_STATUS); 1186 } else { 1187 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 1188 outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, 1189 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 1190 1191 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 1192 } 1193 1194 if (ata_msg_intr(ap)) 1195 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", 1196 __FUNCTION__, 1197 host_stat, post_stat, status); 1198 1199 return status; 1200} 1201 1202static inline int ata_try_flush_cache(const struct ata_device *dev) 1203{ 1204 return ata_id_wcache_enabled(dev->id) || 1205 ata_id_has_flush(dev->id) || 1206 ata_id_has_flush_ext(dev->id); 1207} 1208 1209static inline unsigned int ac_err_mask(u8 status) 1210{ 1211 if (status & (ATA_BUSY | ATA_DRQ)) 1212 return AC_ERR_HSM; 1213 if (status & (ATA_ERR | ATA_DF)) 1214 return AC_ERR_DEV; 1215 return 0; 1216} 1217 1218static inline unsigned int __ac_err_mask(u8 status) 1219{ 1220 unsigned int mask = ac_err_mask(status); 1221 if (mask == 0) 1222 return AC_ERR_OTHER; 1223 return mask; 1224} 1225 1226static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev) 1227{ 1228 ap->pad_dma = 0; 1229 ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, 1230 &ap->pad_dma, GFP_KERNEL); 1231 return (ap->pad == NULL) ? -ENOMEM : 0; 1232} 1233 1234static inline void ata_pad_free(struct ata_port *ap, struct device *dev) 1235{ 1236 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); 1237} 1238 1239static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) 1240{ 1241 return (struct ata_port *) &host->hostdata[0]; 1242} 1243 1244#endif /* __LINUX_LIBATA_H__ */ 1245