sata_sil.c revision 8cf32ac6578a70025be1103466da9d1d6141429e
1/* 2 * sata_sil.c - Silicon Image SATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2005 Red Hat, Inc. 9 * Copyright 2003 Benjamin Herrenschmidt 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Documentation for SiI 3112: 31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2 32 * 33 * Other errata and documentation available under NDA. 34 * 35 */ 36 37#include <linux/kernel.h> 38#include <linux/module.h> 39#include <linux/pci.h> 40#include <linux/init.h> 41#include <linux/blkdev.h> 42#include <linux/delay.h> 43#include <linux/interrupt.h> 44#include <linux/device.h> 45#include <scsi/scsi_host.h> 46#include <linux/libata.h> 47 48#define DRV_NAME "sata_sil" 49#define DRV_VERSION "2.3" 50 51enum { 52 SIL_MMIO_BAR = 5, 53 54 /* 55 * host flags 56 */ 57 SIL_FLAG_NO_SATA_IRQ = (1 << 28), 58 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 59 SIL_FLAG_MOD15WRITE = (1 << 30), 60 61 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 62 ATA_FLAG_MMIO, 63 SIL_DFL_LINK_FLAGS = ATA_LFLAG_HRST_TO_RESUME, 64 65 /* 66 * Controller IDs 67 */ 68 sil_3112 = 0, 69 sil_3112_no_sata_irq = 1, 70 sil_3512 = 2, 71 sil_3114 = 3, 72 73 /* 74 * Register offsets 75 */ 76 SIL_SYSCFG = 0x48, 77 78 /* 79 * Register bits 80 */ 81 /* SYSCFG */ 82 SIL_MASK_IDE0_INT = (1 << 22), 83 SIL_MASK_IDE1_INT = (1 << 23), 84 SIL_MASK_IDE2_INT = (1 << 24), 85 SIL_MASK_IDE3_INT = (1 << 25), 86 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT, 87 SIL_MASK_4PORT = SIL_MASK_2PORT | 88 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT, 89 90 /* BMDMA/BMDMA2 */ 91 SIL_INTR_STEERING = (1 << 1), 92 93 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */ 94 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */ 95 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */ 96 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */ 97 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */ 98 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */ 99 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */ 100 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */ 101 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */ 102 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */ 103 104 /* SIEN */ 105 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */ 106 107 /* 108 * Others 109 */ 110 SIL_QUIRK_MOD15WRITE = (1 << 0), 111 SIL_QUIRK_UDMA5MAX = (1 << 1), 112}; 113 114static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 115#ifdef CONFIG_PM 116static int sil_pci_device_resume(struct pci_dev *pdev); 117#endif 118static void sil_dev_config(struct ata_device *dev); 119static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 120static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 121static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); 122static void sil_freeze(struct ata_port *ap); 123static void sil_thaw(struct ata_port *ap); 124 125 126static const struct pci_device_id sil_pci_tbl[] = { 127 { PCI_VDEVICE(CMD, 0x3112), sil_3112 }, 128 { PCI_VDEVICE(CMD, 0x0240), sil_3112 }, 129 { PCI_VDEVICE(CMD, 0x3512), sil_3512 }, 130 { PCI_VDEVICE(CMD, 0x3114), sil_3114 }, 131 { PCI_VDEVICE(ATI, 0x436e), sil_3112 }, 132 { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq }, 133 { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq }, 134 135 { } /* terminate list */ 136}; 137 138 139/* TODO firmware versions should be added - eric */ 140static const struct sil_drivelist { 141 const char *product; 142 unsigned int quirk; 143} sil_blacklist [] = { 144 { "ST320012AS", SIL_QUIRK_MOD15WRITE }, 145 { "ST330013AS", SIL_QUIRK_MOD15WRITE }, 146 { "ST340017AS", SIL_QUIRK_MOD15WRITE }, 147 { "ST360015AS", SIL_QUIRK_MOD15WRITE }, 148 { "ST380023AS", SIL_QUIRK_MOD15WRITE }, 149 { "ST3120023AS", SIL_QUIRK_MOD15WRITE }, 150 { "ST340014ASL", SIL_QUIRK_MOD15WRITE }, 151 { "ST360014ASL", SIL_QUIRK_MOD15WRITE }, 152 { "ST380011ASL", SIL_QUIRK_MOD15WRITE }, 153 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE }, 154 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE }, 155 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX }, 156 { } 157}; 158 159static struct pci_driver sil_pci_driver = { 160 .name = DRV_NAME, 161 .id_table = sil_pci_tbl, 162 .probe = sil_init_one, 163 .remove = ata_pci_remove_one, 164#ifdef CONFIG_PM 165 .suspend = ata_pci_device_suspend, 166 .resume = sil_pci_device_resume, 167#endif 168}; 169 170static struct scsi_host_template sil_sht = { 171 .module = THIS_MODULE, 172 .name = DRV_NAME, 173 .ioctl = ata_scsi_ioctl, 174 .queuecommand = ata_scsi_queuecmd, 175 .can_queue = ATA_DEF_QUEUE, 176 .this_id = ATA_SHT_THIS_ID, 177 .sg_tablesize = LIBATA_MAX_PRD, 178 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 179 .emulated = ATA_SHT_EMULATED, 180 .use_clustering = ATA_SHT_USE_CLUSTERING, 181 .proc_name = DRV_NAME, 182 .dma_boundary = ATA_DMA_BOUNDARY, 183 .slave_configure = ata_scsi_slave_config, 184 .slave_destroy = ata_scsi_slave_destroy, 185 .bios_param = ata_std_bios_param, 186}; 187 188static const struct ata_port_operations sil_ops = { 189 .dev_config = sil_dev_config, 190 .tf_load = ata_tf_load, 191 .tf_read = ata_tf_read, 192 .check_status = ata_check_status, 193 .exec_command = ata_exec_command, 194 .dev_select = ata_std_dev_select, 195 .set_mode = sil_set_mode, 196 .bmdma_setup = ata_bmdma_setup, 197 .bmdma_start = ata_bmdma_start, 198 .bmdma_stop = ata_bmdma_stop, 199 .bmdma_status = ata_bmdma_status, 200 .qc_prep = ata_qc_prep, 201 .qc_issue = ata_qc_issue_prot, 202 .data_xfer = ata_data_xfer, 203 .freeze = sil_freeze, 204 .thaw = sil_thaw, 205 .error_handler = ata_bmdma_error_handler, 206 .post_internal_cmd = ata_bmdma_post_internal_cmd, 207 .irq_clear = ata_bmdma_irq_clear, 208 .irq_on = ata_irq_on, 209 .scr_read = sil_scr_read, 210 .scr_write = sil_scr_write, 211 .port_start = ata_port_start, 212}; 213 214static const struct ata_port_info sil_port_info[] = { 215 /* sil_3112 */ 216 { 217 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE, 218 .link_flags = SIL_DFL_LINK_FLAGS, 219 .pio_mask = 0x1f, /* pio0-4 */ 220 .mwdma_mask = 0x07, /* mwdma0-2 */ 221 .udma_mask = ATA_UDMA5, 222 .port_ops = &sil_ops, 223 }, 224 /* sil_3112_no_sata_irq */ 225 { 226 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE | 227 SIL_FLAG_NO_SATA_IRQ, 228 .link_flags = SIL_DFL_LINK_FLAGS, 229 .pio_mask = 0x1f, /* pio0-4 */ 230 .mwdma_mask = 0x07, /* mwdma0-2 */ 231 .udma_mask = ATA_UDMA5, 232 .port_ops = &sil_ops, 233 }, 234 /* sil_3512 */ 235 { 236 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 237 .link_flags = SIL_DFL_LINK_FLAGS, 238 .pio_mask = 0x1f, /* pio0-4 */ 239 .mwdma_mask = 0x07, /* mwdma0-2 */ 240 .udma_mask = ATA_UDMA5, 241 .port_ops = &sil_ops, 242 }, 243 /* sil_3114 */ 244 { 245 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 246 .link_flags = SIL_DFL_LINK_FLAGS, 247 .pio_mask = 0x1f, /* pio0-4 */ 248 .mwdma_mask = 0x07, /* mwdma0-2 */ 249 .udma_mask = ATA_UDMA5, 250 .port_ops = &sil_ops, 251 }, 252}; 253 254/* per-port register offsets */ 255/* TODO: we can probably calculate rather than use a table */ 256static const struct { 257 unsigned long tf; /* ATA taskfile register block */ 258 unsigned long ctl; /* ATA control/altstatus register block */ 259 unsigned long bmdma; /* DMA register block */ 260 unsigned long bmdma2; /* DMA register block #2 */ 261 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */ 262 unsigned long scr; /* SATA control register block */ 263 unsigned long sien; /* SATA Interrupt Enable register */ 264 unsigned long xfer_mode;/* data transfer mode register */ 265 unsigned long sfis_cfg; /* SATA FIS reception config register */ 266} sil_port[] = { 267 /* port 0 ... */ 268 /* tf ctl bmdma bmdma2 fifo scr sien mode sfis */ 269 { 0x80, 0x8A, 0x0, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c }, 270 { 0xC0, 0xCA, 0x8, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc }, 271 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c }, 272 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc }, 273 /* ... port 3 */ 274}; 275 276MODULE_AUTHOR("Jeff Garzik"); 277MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller"); 278MODULE_LICENSE("GPL"); 279MODULE_DEVICE_TABLE(pci, sil_pci_tbl); 280MODULE_VERSION(DRV_VERSION); 281 282static int slow_down; 283module_param(slow_down, int, 0444); 284MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)"); 285 286 287static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) 288{ 289 u8 cache_line = 0; 290 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line); 291 return cache_line; 292} 293 294/** 295 * sil_set_mode - wrap set_mode functions 296 * @link: link to set up 297 * @r_failed: returned device when we fail 298 * 299 * Wrap the libata method for device setup as after the setup we need 300 * to inspect the results and do some configuration work 301 */ 302 303static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed) 304{ 305 struct ata_port *ap = link->ap; 306 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; 307 void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode; 308 struct ata_device *dev; 309 u32 tmp, dev_mode[2] = { }; 310 int rc; 311 312 rc = ata_do_set_mode(link, r_failed); 313 if (rc) 314 return rc; 315 316 ata_link_for_each_dev(dev, link) { 317 if (!ata_dev_enabled(dev)) 318 dev_mode[dev->devno] = 0; /* PIO0/1/2 */ 319 else if (dev->flags & ATA_DFLAG_PIO) 320 dev_mode[dev->devno] = 1; /* PIO3/4 */ 321 else 322 dev_mode[dev->devno] = 3; /* UDMA */ 323 /* value 2 indicates MDMA */ 324 } 325 326 tmp = readl(addr); 327 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0)); 328 tmp |= dev_mode[0]; 329 tmp |= (dev_mode[1] << 4); 330 writel(tmp, addr); 331 readl(addr); /* flush */ 332 return 0; 333} 334 335static inline void __iomem *sil_scr_addr(struct ata_port *ap, 336 unsigned int sc_reg) 337{ 338 void __iomem *offset = ap->ioaddr.scr_addr; 339 340 switch (sc_reg) { 341 case SCR_STATUS: 342 return offset + 4; 343 case SCR_ERROR: 344 return offset + 8; 345 case SCR_CONTROL: 346 return offset; 347 default: 348 /* do nothing */ 349 break; 350 } 351 352 return NULL; 353} 354 355static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 356{ 357 void __iomem *mmio = sil_scr_addr(ap, sc_reg); 358 359 if (mmio) { 360 *val = readl(mmio); 361 return 0; 362 } 363 return -EINVAL; 364} 365 366static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 367{ 368 void __iomem *mmio = sil_scr_addr(ap, sc_reg); 369 370 if (mmio) { 371 writel(val, mmio); 372 return 0; 373 } 374 return -EINVAL; 375} 376 377static void sil_host_intr(struct ata_port *ap, u32 bmdma2) 378{ 379 struct ata_eh_info *ehi = &ap->link.eh_info; 380 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 381 u8 status; 382 383 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { 384 u32 serror; 385 386 /* SIEN doesn't mask SATA IRQs on some 3112s. Those 387 * controllers continue to assert IRQ as long as 388 * SError bits are pending. Clear SError immediately. 389 */ 390 sil_scr_read(ap, SCR_ERROR, &serror); 391 sil_scr_write(ap, SCR_ERROR, serror); 392 393 /* Sometimes spurious interrupts occur, double check 394 * it's PHYRDY CHG. 395 */ 396 if (serror & SERR_PHYRDY_CHG) { 397 /* Trigger hotplug and accumulate SError only 398 * if the port isn't already frozen. 399 * Otherwise, PHY events during hardreset 400 * makes controllers with broken SIEN repeat 401 * probing needlessly. 402 */ 403 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 404 ata_ehi_hotplugged(&ap->link.eh_info); 405 ap->link.eh_info.serror |= serror; 406 } 407 goto freeze; 408 } 409 410 if (!(bmdma2 & SIL_DMA_COMPLETE)) 411 return; 412 } 413 414 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 415 /* this sometimes happens, just clear IRQ */ 416 ata_chk_status(ap); 417 return; 418 } 419 420 /* Check whether we are expecting interrupt in this state */ 421 switch (ap->hsm_task_state) { 422 case HSM_ST_FIRST: 423 /* Some pre-ATAPI-4 devices assert INTRQ 424 * at this state when ready to receive CDB. 425 */ 426 427 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 428 * The flag was turned on only for atapi devices. 429 * No need to check is_atapi_taskfile(&qc->tf) again. 430 */ 431 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 432 goto err_hsm; 433 break; 434 case HSM_ST_LAST: 435 if (qc->tf.protocol == ATA_PROT_DMA || 436 qc->tf.protocol == ATA_PROT_ATAPI_DMA) { 437 /* clear DMA-Start bit */ 438 ap->ops->bmdma_stop(qc); 439 440 if (bmdma2 & SIL_DMA_ERROR) { 441 qc->err_mask |= AC_ERR_HOST_BUS; 442 ap->hsm_task_state = HSM_ST_ERR; 443 } 444 } 445 break; 446 case HSM_ST: 447 break; 448 default: 449 goto err_hsm; 450 } 451 452 /* check main status, clearing INTRQ */ 453 status = ata_chk_status(ap); 454 if (unlikely(status & ATA_BUSY)) 455 goto err_hsm; 456 457 /* ack bmdma irq events */ 458 ata_bmdma_irq_clear(ap); 459 460 /* kick HSM in the ass */ 461 ata_hsm_move(ap, qc, status, 0); 462 463 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || 464 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) 465 ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2); 466 467 return; 468 469 err_hsm: 470 qc->err_mask |= AC_ERR_HSM; 471 freeze: 472 ata_port_freeze(ap); 473} 474 475static irqreturn_t sil_interrupt(int irq, void *dev_instance) 476{ 477 struct ata_host *host = dev_instance; 478 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; 479 int handled = 0; 480 int i; 481 482 spin_lock(&host->lock); 483 484 for (i = 0; i < host->n_ports; i++) { 485 struct ata_port *ap = host->ports[i]; 486 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); 487 488 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) 489 continue; 490 491 /* turn off SATA_IRQ if not supported */ 492 if (ap->flags & SIL_FLAG_NO_SATA_IRQ) 493 bmdma2 &= ~SIL_DMA_SATA_IRQ; 494 495 if (bmdma2 == 0xffffffff || 496 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ))) 497 continue; 498 499 sil_host_intr(ap, bmdma2); 500 handled = 1; 501 } 502 503 spin_unlock(&host->lock); 504 505 return IRQ_RETVAL(handled); 506} 507 508static void sil_freeze(struct ata_port *ap) 509{ 510 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; 511 u32 tmp; 512 513 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */ 514 writel(0, mmio_base + sil_port[ap->port_no].sien); 515 516 /* plug IRQ */ 517 tmp = readl(mmio_base + SIL_SYSCFG); 518 tmp |= SIL_MASK_IDE0_INT << ap->port_no; 519 writel(tmp, mmio_base + SIL_SYSCFG); 520 readl(mmio_base + SIL_SYSCFG); /* flush */ 521} 522 523static void sil_thaw(struct ata_port *ap) 524{ 525 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; 526 u32 tmp; 527 528 /* clear IRQ */ 529 ata_chk_status(ap); 530 ata_bmdma_irq_clear(ap); 531 532 /* turn on SATA IRQ if supported */ 533 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) 534 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien); 535 536 /* turn on IRQ */ 537 tmp = readl(mmio_base + SIL_SYSCFG); 538 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no); 539 writel(tmp, mmio_base + SIL_SYSCFG); 540} 541 542/** 543 * sil_dev_config - Apply device/host-specific errata fixups 544 * @dev: Device to be examined 545 * 546 * After the IDENTIFY [PACKET] DEVICE step is complete, and a 547 * device is known to be present, this function is called. 548 * We apply two errata fixups which are specific to Silicon Image, 549 * a Seagate and a Maxtor fixup. 550 * 551 * For certain Seagate devices, we must limit the maximum sectors 552 * to under 8K. 553 * 554 * For certain Maxtor devices, we must not program the drive 555 * beyond udma5. 556 * 557 * Both fixups are unfairly pessimistic. As soon as I get more 558 * information on these errata, I will create a more exhaustive 559 * list, and apply the fixups to only the specific 560 * devices/hosts/firmwares that need it. 561 * 562 * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted 563 * The Maxtor quirk is in the blacklist, but I'm keeping the original 564 * pessimistic fix for the following reasons... 565 * - There seems to be less info on it, only one device gleaned off the 566 * Windows driver, maybe only one is affected. More info would be greatly 567 * appreciated. 568 * - But then again UDMA5 is hardly anything to complain about 569 */ 570static void sil_dev_config(struct ata_device *dev) 571{ 572 struct ata_port *ap = dev->link->ap; 573 int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO; 574 unsigned int n, quirks = 0; 575 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 576 577 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 578 579 for (n = 0; sil_blacklist[n].product; n++) 580 if (!strcmp(sil_blacklist[n].product, model_num)) { 581 quirks = sil_blacklist[n].quirk; 582 break; 583 } 584 585 /* limit requests to 15 sectors */ 586 if (slow_down || 587 ((ap->flags & SIL_FLAG_MOD15WRITE) && 588 (quirks & SIL_QUIRK_MOD15WRITE))) { 589 if (print_info) 590 ata_dev_printk(dev, KERN_INFO, "applying Seagate " 591 "errata fix (mod15write workaround)\n"); 592 dev->max_sectors = 15; 593 return; 594 } 595 596 /* limit to udma5 */ 597 if (quirks & SIL_QUIRK_UDMA5MAX) { 598 if (print_info) 599 ata_dev_printk(dev, KERN_INFO, "applying Maxtor " 600 "errata fix %s\n", model_num); 601 dev->udma_mask &= ATA_UDMA5; 602 return; 603 } 604} 605 606static void sil_init_controller(struct ata_host *host) 607{ 608 struct pci_dev *pdev = to_pci_dev(host->dev); 609 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; 610 u8 cls; 611 u32 tmp; 612 int i; 613 614 /* Initialize FIFO PCI bus arbitration */ 615 cls = sil_get_device_cache_line(pdev); 616 if (cls) { 617 cls >>= 3; 618 cls++; /* cls = (line_size/8)+1 */ 619 for (i = 0; i < host->n_ports; i++) 620 writew(cls << 8 | cls, 621 mmio_base + sil_port[i].fifo_cfg); 622 } else 623 dev_printk(KERN_WARNING, &pdev->dev, 624 "cache line size not set. Driver may not function\n"); 625 626 /* Apply R_ERR on DMA activate FIS errata workaround */ 627 if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) { 628 int cnt; 629 630 for (i = 0, cnt = 0; i < host->n_ports; i++) { 631 tmp = readl(mmio_base + sil_port[i].sfis_cfg); 632 if ((tmp & 0x3) != 0x01) 633 continue; 634 if (!cnt) 635 dev_printk(KERN_INFO, &pdev->dev, 636 "Applying R_ERR on DMA activate " 637 "FIS errata fix\n"); 638 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg); 639 cnt++; 640 } 641 } 642 643 if (host->n_ports == 4) { 644 /* flip the magic "make 4 ports work" bit */ 645 tmp = readl(mmio_base + sil_port[2].bmdma); 646 if ((tmp & SIL_INTR_STEERING) == 0) 647 writel(tmp | SIL_INTR_STEERING, 648 mmio_base + sil_port[2].bmdma); 649 } 650} 651 652static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 653{ 654 static int printed_version; 655 int board_id = ent->driver_data; 656 const struct ata_port_info *ppi[] = { &sil_port_info[board_id], NULL }; 657 struct ata_host *host; 658 void __iomem *mmio_base; 659 int n_ports, rc; 660 unsigned int i; 661 662 if (!printed_version++) 663 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 664 665 /* allocate host */ 666 n_ports = 2; 667 if (board_id == sil_3114) 668 n_ports = 4; 669 670 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 671 if (!host) 672 return -ENOMEM; 673 674 /* acquire resources and fill host */ 675 rc = pcim_enable_device(pdev); 676 if (rc) 677 return rc; 678 679 rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME); 680 if (rc == -EBUSY) 681 pcim_pin_device(pdev); 682 if (rc) 683 return rc; 684 host->iomap = pcim_iomap_table(pdev); 685 686 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 687 if (rc) 688 return rc; 689 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 690 if (rc) 691 return rc; 692 693 mmio_base = host->iomap[SIL_MMIO_BAR]; 694 695 for (i = 0; i < host->n_ports; i++) { 696 struct ata_port *ap = host->ports[i]; 697 struct ata_ioports *ioaddr = &ap->ioaddr; 698 699 ioaddr->cmd_addr = mmio_base + sil_port[i].tf; 700 ioaddr->altstatus_addr = 701 ioaddr->ctl_addr = mmio_base + sil_port[i].ctl; 702 ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma; 703 ioaddr->scr_addr = mmio_base + sil_port[i].scr; 704 ata_std_ports(ioaddr); 705 706 ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio"); 707 ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf"); 708 } 709 710 /* initialize and activate */ 711 sil_init_controller(host); 712 713 pci_set_master(pdev); 714 return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED, 715 &sil_sht); 716} 717 718#ifdef CONFIG_PM 719static int sil_pci_device_resume(struct pci_dev *pdev) 720{ 721 struct ata_host *host = dev_get_drvdata(&pdev->dev); 722 int rc; 723 724 rc = ata_pci_device_do_resume(pdev); 725 if (rc) 726 return rc; 727 728 sil_init_controller(host); 729 ata_host_resume(host); 730 731 return 0; 732} 733#endif 734 735static int __init sil_init(void) 736{ 737 return pci_register_driver(&sil_pci_driver); 738} 739 740static void __exit sil_exit(void) 741{ 742 pci_unregister_driver(&sil_pci_driver); 743} 744 745 746module_init(sil_init); 747module_exit(sil_exit); 748