1/* 2 * Disk Array driver for HP Smart Array controllers. 3 * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 17 * 02111-1307, USA. 18 * 19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 20 * 21 */ 22 23#include <linux/module.h> 24#include <linux/interrupt.h> 25#include <linux/types.h> 26#include <linux/pci.h> 27#include <linux/pci-aspm.h> 28#include <linux/kernel.h> 29#include <linux/slab.h> 30#include <linux/delay.h> 31#include <linux/major.h> 32#include <linux/fs.h> 33#include <linux/bio.h> 34#include <linux/blkpg.h> 35#include <linux/timer.h> 36#include <linux/proc_fs.h> 37#include <linux/seq_file.h> 38#include <linux/init.h> 39#include <linux/jiffies.h> 40#include <linux/hdreg.h> 41#include <linux/spinlock.h> 42#include <linux/compat.h> 43#include <linux/mutex.h> 44#include <asm/uaccess.h> 45#include <asm/io.h> 46 47#include <linux/dma-mapping.h> 48#include <linux/blkdev.h> 49#include <linux/genhd.h> 50#include <linux/completion.h> 51#include <scsi/scsi.h> 52#include <scsi/sg.h> 53#include <scsi/scsi_ioctl.h> 54#include <linux/cdrom.h> 55#include <linux/scatterlist.h> 56#include <linux/kthread.h> 57 58#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) 59#define DRIVER_NAME "HP CISS Driver (v 3.6.26)" 60#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 26) 61 62/* Embedded module documentation macros - see modules.h */ 63MODULE_AUTHOR("Hewlett-Packard Company"); 64MODULE_DESCRIPTION("Driver for HP Smart Array Controllers"); 65MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 66MODULE_VERSION("3.6.26"); 67MODULE_LICENSE("GPL"); 68static int cciss_tape_cmds = 6; 69module_param(cciss_tape_cmds, int, 0644); 70MODULE_PARM_DESC(cciss_tape_cmds, 71 "number of commands to allocate for tape devices (default: 6)"); 72static int cciss_simple_mode; 73module_param(cciss_simple_mode, int, S_IRUGO|S_IWUSR); 74MODULE_PARM_DESC(cciss_simple_mode, 75 "Use 'simple mode' rather than 'performant mode'"); 76 77static DEFINE_MUTEX(cciss_mutex); 78static struct proc_dir_entry *proc_cciss; 79 80#include "cciss_cmd.h" 81#include "cciss.h" 82#include <linux/cciss_ioctl.h> 83 84/* define the PCI info for the cards we can control */ 85static const struct pci_device_id cciss_pci_device_id[] = { 86 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070}, 87 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080}, 88 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082}, 89 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083}, 90 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091}, 91 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A}, 92 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B}, 93 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C}, 94 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235}, 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211}, 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212}, 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213}, 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214}, 103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215}, 104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237}, 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D}, 106 {0,} 107}; 108 109MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); 110 111/* board_id = Subsystem Device ID & Vendor ID 112 * product = Marketing Name for the board 113 * access = Address of the struct of function pointers 114 */ 115static struct board_type products[] = { 116 {0x40700E11, "Smart Array 5300", &SA5_access}, 117 {0x40800E11, "Smart Array 5i", &SA5B_access}, 118 {0x40820E11, "Smart Array 532", &SA5B_access}, 119 {0x40830E11, "Smart Array 5312", &SA5B_access}, 120 {0x409A0E11, "Smart Array 641", &SA5_access}, 121 {0x409B0E11, "Smart Array 642", &SA5_access}, 122 {0x409C0E11, "Smart Array 6400", &SA5_access}, 123 {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, 124 {0x40910E11, "Smart Array 6i", &SA5_access}, 125 {0x3225103C, "Smart Array P600", &SA5_access}, 126 {0x3223103C, "Smart Array P800", &SA5_access}, 127 {0x3234103C, "Smart Array P400", &SA5_access}, 128 {0x3235103C, "Smart Array P400i", &SA5_access}, 129 {0x3211103C, "Smart Array E200i", &SA5_access}, 130 {0x3212103C, "Smart Array E200", &SA5_access}, 131 {0x3213103C, "Smart Array E200i", &SA5_access}, 132 {0x3214103C, "Smart Array E200i", &SA5_access}, 133 {0x3215103C, "Smart Array E200i", &SA5_access}, 134 {0x3237103C, "Smart Array E500", &SA5_access}, 135 {0x3223103C, "Smart Array P800", &SA5_access}, 136 {0x3234103C, "Smart Array P400", &SA5_access}, 137 {0x323D103C, "Smart Array P700m", &SA5_access}, 138}; 139 140/* How long to wait (in milliseconds) for board to go into simple mode */ 141#define MAX_CONFIG_WAIT 30000 142#define MAX_IOCTL_CONFIG_WAIT 1000 143 144/*define how many times we will try a command because of bus resets */ 145#define MAX_CMD_RETRIES 3 146 147#define MAX_CTLR 32 148 149/* Originally cciss driver only supports 8 major numbers */ 150#define MAX_CTLR_ORIG 8 151 152static ctlr_info_t *hba[MAX_CTLR]; 153 154static struct task_struct *cciss_scan_thread; 155static DEFINE_MUTEX(scan_mutex); 156static LIST_HEAD(scan_q); 157 158static void do_cciss_request(struct request_queue *q); 159static irqreturn_t do_cciss_intx(int irq, void *dev_id); 160static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id); 161static int cciss_open(struct block_device *bdev, fmode_t mode); 162static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); 163static int cciss_release(struct gendisk *disk, fmode_t mode); 164static int do_ioctl(struct block_device *bdev, fmode_t mode, 165 unsigned int cmd, unsigned long arg); 166static int cciss_ioctl(struct block_device *bdev, fmode_t mode, 167 unsigned int cmd, unsigned long arg); 168static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); 169 170static int cciss_revalidate(struct gendisk *disk); 171static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl); 172static int deregister_disk(ctlr_info_t *h, int drv_index, 173 int clear_all, int via_ioctl); 174 175static void cciss_read_capacity(ctlr_info_t *h, int logvol, 176 sector_t *total_size, unsigned int *block_size); 177static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, 178 sector_t *total_size, unsigned int *block_size); 179static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, 180 sector_t total_size, 181 unsigned int block_size, InquiryData_struct *inq_buff, 182 drive_info_struct *drv); 183static void __devinit cciss_interrupt_mode(ctlr_info_t *); 184static int __devinit cciss_enter_simple_mode(struct ctlr_info *h); 185static void start_io(ctlr_info_t *h); 186static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, 187 __u8 page_code, unsigned char scsi3addr[], 188 int cmd_type); 189static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, 190 int attempt_retry); 191static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); 192 193static int add_to_scan_list(struct ctlr_info *h); 194static int scan_thread(void *data); 195static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); 196static void cciss_hba_release(struct device *dev); 197static void cciss_device_release(struct device *dev); 198static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); 199static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); 200static inline u32 next_command(ctlr_info_t *h); 201static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev, 202 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 203 u64 *cfg_offset); 204static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, 205 unsigned long *memory_bar); 206static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag); 207static __devinit int write_driver_ver_to_cfgtable( 208 CfgTable_struct __iomem *cfgtable); 209 210/* performant mode helper functions */ 211static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, 212 int *bucket_map); 213static void cciss_put_controller_into_performant_mode(ctlr_info_t *h); 214 215#ifdef CONFIG_PROC_FS 216static void cciss_procinit(ctlr_info_t *h); 217#else 218static void cciss_procinit(ctlr_info_t *h) 219{ 220} 221#endif /* CONFIG_PROC_FS */ 222 223#ifdef CONFIG_COMPAT 224static int cciss_compat_ioctl(struct block_device *, fmode_t, 225 unsigned, unsigned long); 226#endif 227 228static const struct block_device_operations cciss_fops = { 229 .owner = THIS_MODULE, 230 .open = cciss_unlocked_open, 231 .release = cciss_release, 232 .ioctl = do_ioctl, 233 .getgeo = cciss_getgeo, 234#ifdef CONFIG_COMPAT 235 .compat_ioctl = cciss_compat_ioctl, 236#endif 237 .revalidate_disk = cciss_revalidate, 238}; 239 240/* set_performant_mode: Modify the tag for cciss performant 241 * set bit 0 for pull model, bits 3-1 for block fetch 242 * register number 243 */ 244static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) 245{ 246 if (likely(h->transMethod & CFGTBL_Trans_Performant)) 247 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 248} 249 250/* 251 * Enqueuing and dequeuing functions for cmdlists. 252 */ 253static inline void addQ(struct list_head *list, CommandList_struct *c) 254{ 255 list_add_tail(&c->list, list); 256} 257 258static inline void removeQ(CommandList_struct *c) 259{ 260 /* 261 * After kexec/dump some commands might still 262 * be in flight, which the firmware will try 263 * to complete. Resetting the firmware doesn't work 264 * with old fw revisions, so we have to mark 265 * them off as 'stale' to prevent the driver from 266 * falling over. 267 */ 268 if (WARN_ON(list_empty(&c->list))) { 269 c->cmd_type = CMD_MSG_STALE; 270 return; 271 } 272 273 list_del_init(&c->list); 274} 275 276static void enqueue_cmd_and_start_io(ctlr_info_t *h, 277 CommandList_struct *c) 278{ 279 unsigned long flags; 280 set_performant_mode(h, c); 281 spin_lock_irqsave(&h->lock, flags); 282 addQ(&h->reqQ, c); 283 h->Qdepth++; 284 if (h->Qdepth > h->maxQsinceinit) 285 h->maxQsinceinit = h->Qdepth; 286 start_io(h); 287 spin_unlock_irqrestore(&h->lock, flags); 288} 289 290static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list, 291 int nr_cmds) 292{ 293 int i; 294 295 if (!cmd_sg_list) 296 return; 297 for (i = 0; i < nr_cmds; i++) { 298 kfree(cmd_sg_list[i]); 299 cmd_sg_list[i] = NULL; 300 } 301 kfree(cmd_sg_list); 302} 303 304static SGDescriptor_struct **cciss_allocate_sg_chain_blocks( 305 ctlr_info_t *h, int chainsize, int nr_cmds) 306{ 307 int j; 308 SGDescriptor_struct **cmd_sg_list; 309 310 if (chainsize <= 0) 311 return NULL; 312 313 cmd_sg_list = kmalloc(sizeof(*cmd_sg_list) * nr_cmds, GFP_KERNEL); 314 if (!cmd_sg_list) 315 return NULL; 316 317 /* Build up chain blocks for each command */ 318 for (j = 0; j < nr_cmds; j++) { 319 /* Need a block of chainsized s/g elements. */ 320 cmd_sg_list[j] = kmalloc((chainsize * 321 sizeof(*cmd_sg_list[j])), GFP_KERNEL); 322 if (!cmd_sg_list[j]) { 323 dev_err(&h->pdev->dev, "Cannot get memory " 324 "for s/g chains.\n"); 325 goto clean; 326 } 327 } 328 return cmd_sg_list; 329clean: 330 cciss_free_sg_chain_blocks(cmd_sg_list, nr_cmds); 331 return NULL; 332} 333 334static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c) 335{ 336 SGDescriptor_struct *chain_sg; 337 u64bit temp64; 338 339 if (c->Header.SGTotal <= h->max_cmd_sgentries) 340 return; 341 342 chain_sg = &c->SG[h->max_cmd_sgentries - 1]; 343 temp64.val32.lower = chain_sg->Addr.lower; 344 temp64.val32.upper = chain_sg->Addr.upper; 345 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 346} 347 348static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, 349 SGDescriptor_struct *chain_block, int len) 350{ 351 SGDescriptor_struct *chain_sg; 352 u64bit temp64; 353 354 chain_sg = &c->SG[h->max_cmd_sgentries - 1]; 355 chain_sg->Ext = CCISS_SG_CHAIN; 356 chain_sg->Len = len; 357 temp64.val = pci_map_single(h->pdev, chain_block, len, 358 PCI_DMA_TODEVICE); 359 chain_sg->Addr.lower = temp64.val32.lower; 360 chain_sg->Addr.upper = temp64.val32.upper; 361} 362 363#include "cciss_scsi.c" /* For SCSI tape support */ 364 365static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 366 "UNKNOWN" 367}; 368#define RAID_UNKNOWN (ARRAY_SIZE(raid_label)-1) 369 370#ifdef CONFIG_PROC_FS 371 372/* 373 * Report information about this controller. 374 */ 375#define ENG_GIG 1000000000 376#define ENG_GIG_FACTOR (ENG_GIG/512) 377#define ENGAGE_SCSI "engage scsi" 378 379static void cciss_seq_show_header(struct seq_file *seq) 380{ 381 ctlr_info_t *h = seq->private; 382 383 seq_printf(seq, "%s: HP %s Controller\n" 384 "Board ID: 0x%08lx\n" 385 "Firmware Version: %c%c%c%c\n" 386 "IRQ: %d\n" 387 "Logical drives: %d\n" 388 "Current Q depth: %d\n" 389 "Current # commands on controller: %d\n" 390 "Max Q depth since init: %d\n" 391 "Max # commands on controller since init: %d\n" 392 "Max SG entries since init: %d\n", 393 h->devname, 394 h->product_name, 395 (unsigned long)h->board_id, 396 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], 397 h->firm_ver[3], (unsigned int)h->intr[h->intr_mode], 398 h->num_luns, 399 h->Qdepth, h->commands_outstanding, 400 h->maxQsinceinit, h->max_outstanding, h->maxSG); 401 402#ifdef CONFIG_CISS_SCSI_TAPE 403 cciss_seq_tape_report(seq, h); 404#endif /* CONFIG_CISS_SCSI_TAPE */ 405} 406 407static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) 408{ 409 ctlr_info_t *h = seq->private; 410 unsigned long flags; 411 412 /* prevent displaying bogus info during configuration 413 * or deconfiguration of a logical volume 414 */ 415 spin_lock_irqsave(&h->lock, flags); 416 if (h->busy_configuring) { 417 spin_unlock_irqrestore(&h->lock, flags); 418 return ERR_PTR(-EBUSY); 419 } 420 h->busy_configuring = 1; 421 spin_unlock_irqrestore(&h->lock, flags); 422 423 if (*pos == 0) 424 cciss_seq_show_header(seq); 425 426 return pos; 427} 428 429static int cciss_seq_show(struct seq_file *seq, void *v) 430{ 431 sector_t vol_sz, vol_sz_frac; 432 ctlr_info_t *h = seq->private; 433 unsigned ctlr = h->ctlr; 434 loff_t *pos = v; 435 drive_info_struct *drv = h->drv[*pos]; 436 437 if (*pos > h->highest_lun) 438 return 0; 439 440 if (drv == NULL) /* it's possible for h->drv[] to have holes. */ 441 return 0; 442 443 if (drv->heads == 0) 444 return 0; 445 446 vol_sz = drv->nr_blocks; 447 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); 448 vol_sz_frac *= 100; 449 sector_div(vol_sz_frac, ENG_GIG_FACTOR); 450 451 if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN) 452 drv->raid_level = RAID_UNKNOWN; 453 seq_printf(seq, "cciss/c%dd%d:" 454 "\t%4u.%02uGB\tRAID %s\n", 455 ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac, 456 raid_label[drv->raid_level]); 457 return 0; 458} 459 460static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos) 461{ 462 ctlr_info_t *h = seq->private; 463 464 if (*pos > h->highest_lun) 465 return NULL; 466 *pos += 1; 467 468 return pos; 469} 470 471static void cciss_seq_stop(struct seq_file *seq, void *v) 472{ 473 ctlr_info_t *h = seq->private; 474 475 /* Only reset h->busy_configuring if we succeeded in setting 476 * it during cciss_seq_start. */ 477 if (v == ERR_PTR(-EBUSY)) 478 return; 479 480 h->busy_configuring = 0; 481} 482 483static const struct seq_operations cciss_seq_ops = { 484 .start = cciss_seq_start, 485 .show = cciss_seq_show, 486 .next = cciss_seq_next, 487 .stop = cciss_seq_stop, 488}; 489 490static int cciss_seq_open(struct inode *inode, struct file *file) 491{ 492 int ret = seq_open(file, &cciss_seq_ops); 493 struct seq_file *seq = file->private_data; 494 495 if (!ret) 496 seq->private = PDE(inode)->data; 497 498 return ret; 499} 500 501static ssize_t 502cciss_proc_write(struct file *file, const char __user *buf, 503 size_t length, loff_t *ppos) 504{ 505 int err; 506 char *buffer; 507 508#ifndef CONFIG_CISS_SCSI_TAPE 509 return -EINVAL; 510#endif 511 512 if (!buf || length > PAGE_SIZE - 1) 513 return -EINVAL; 514 515 buffer = (char *)__get_free_page(GFP_KERNEL); 516 if (!buffer) 517 return -ENOMEM; 518 519 err = -EFAULT; 520 if (copy_from_user(buffer, buf, length)) 521 goto out; 522 buffer[length] = '\0'; 523 524#ifdef CONFIG_CISS_SCSI_TAPE 525 if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { 526 struct seq_file *seq = file->private_data; 527 ctlr_info_t *h = seq->private; 528 529 err = cciss_engage_scsi(h); 530 if (err == 0) 531 err = length; 532 } else 533#endif /* CONFIG_CISS_SCSI_TAPE */ 534 err = -EINVAL; 535 /* might be nice to have "disengage" too, but it's not 536 safely possible. (only 1 module use count, lock issues.) */ 537 538out: 539 free_page((unsigned long)buffer); 540 return err; 541} 542 543static const struct file_operations cciss_proc_fops = { 544 .owner = THIS_MODULE, 545 .open = cciss_seq_open, 546 .read = seq_read, 547 .llseek = seq_lseek, 548 .release = seq_release, 549 .write = cciss_proc_write, 550}; 551 552static void __devinit cciss_procinit(ctlr_info_t *h) 553{ 554 struct proc_dir_entry *pde; 555 556 if (proc_cciss == NULL) 557 proc_cciss = proc_mkdir("driver/cciss", NULL); 558 if (!proc_cciss) 559 return; 560 pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP | 561 S_IROTH, proc_cciss, 562 &cciss_proc_fops, h); 563} 564#endif /* CONFIG_PROC_FS */ 565 566#define MAX_PRODUCT_NAME_LEN 19 567 568#define to_hba(n) container_of(n, struct ctlr_info, dev) 569#define to_drv(n) container_of(n, drive_info_struct, dev) 570 571/* List of controllers which cannot be hard reset on kexec with reset_devices */ 572static u32 unresettable_controller[] = { 573 0x324a103C, /* Smart Array P712m */ 574 0x324b103C, /* SmartArray P711m */ 575 0x3223103C, /* Smart Array P800 */ 576 0x3234103C, /* Smart Array P400 */ 577 0x3235103C, /* Smart Array P400i */ 578 0x3211103C, /* Smart Array E200i */ 579 0x3212103C, /* Smart Array E200 */ 580 0x3213103C, /* Smart Array E200i */ 581 0x3214103C, /* Smart Array E200i */ 582 0x3215103C, /* Smart Array E200i */ 583 0x3237103C, /* Smart Array E500 */ 584 0x323D103C, /* Smart Array P700m */ 585 0x409C0E11, /* Smart Array 6400 */ 586 0x409D0E11, /* Smart Array 6400 EM */ 587}; 588 589/* List of controllers which cannot even be soft reset */ 590static u32 soft_unresettable_controller[] = { 591 0x409C0E11, /* Smart Array 6400 */ 592 0x409D0E11, /* Smart Array 6400 EM */ 593}; 594 595static int ctlr_is_hard_resettable(u32 board_id) 596{ 597 int i; 598 599 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 600 if (unresettable_controller[i] == board_id) 601 return 0; 602 return 1; 603} 604 605static int ctlr_is_soft_resettable(u32 board_id) 606{ 607 int i; 608 609 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) 610 if (soft_unresettable_controller[i] == board_id) 611 return 0; 612 return 1; 613} 614 615static int ctlr_is_resettable(u32 board_id) 616{ 617 return ctlr_is_hard_resettable(board_id) || 618 ctlr_is_soft_resettable(board_id); 619} 620 621static ssize_t host_show_resettable(struct device *dev, 622 struct device_attribute *attr, 623 char *buf) 624{ 625 struct ctlr_info *h = to_hba(dev); 626 627 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); 628} 629static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); 630 631static ssize_t host_store_rescan(struct device *dev, 632 struct device_attribute *attr, 633 const char *buf, size_t count) 634{ 635 struct ctlr_info *h = to_hba(dev); 636 637 add_to_scan_list(h); 638 wake_up_process(cciss_scan_thread); 639 wait_for_completion_interruptible(&h->scan_wait); 640 641 return count; 642} 643static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 644 645static ssize_t host_show_transport_mode(struct device *dev, 646 struct device_attribute *attr, 647 char *buf) 648{ 649 struct ctlr_info *h = to_hba(dev); 650 651 return snprintf(buf, 20, "%s\n", 652 h->transMethod & CFGTBL_Trans_Performant ? 653 "performant" : "simple"); 654} 655static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL); 656 657static ssize_t dev_show_unique_id(struct device *dev, 658 struct device_attribute *attr, 659 char *buf) 660{ 661 drive_info_struct *drv = to_drv(dev); 662 struct ctlr_info *h = to_hba(drv->dev.parent); 663 __u8 sn[16]; 664 unsigned long flags; 665 int ret = 0; 666 667 spin_lock_irqsave(&h->lock, flags); 668 if (h->busy_configuring) 669 ret = -EBUSY; 670 else 671 memcpy(sn, drv->serial_no, sizeof(sn)); 672 spin_unlock_irqrestore(&h->lock, flags); 673 674 if (ret) 675 return ret; 676 else 677 return snprintf(buf, 16 * 2 + 2, 678 "%02X%02X%02X%02X%02X%02X%02X%02X" 679 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 680 sn[0], sn[1], sn[2], sn[3], 681 sn[4], sn[5], sn[6], sn[7], 682 sn[8], sn[9], sn[10], sn[11], 683 sn[12], sn[13], sn[14], sn[15]); 684} 685static DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL); 686 687static ssize_t dev_show_vendor(struct device *dev, 688 struct device_attribute *attr, 689 char *buf) 690{ 691 drive_info_struct *drv = to_drv(dev); 692 struct ctlr_info *h = to_hba(drv->dev.parent); 693 char vendor[VENDOR_LEN + 1]; 694 unsigned long flags; 695 int ret = 0; 696 697 spin_lock_irqsave(&h->lock, flags); 698 if (h->busy_configuring) 699 ret = -EBUSY; 700 else 701 memcpy(vendor, drv->vendor, VENDOR_LEN + 1); 702 spin_unlock_irqrestore(&h->lock, flags); 703 704 if (ret) 705 return ret; 706 else 707 return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor); 708} 709static DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL); 710 711static ssize_t dev_show_model(struct device *dev, 712 struct device_attribute *attr, 713 char *buf) 714{ 715 drive_info_struct *drv = to_drv(dev); 716 struct ctlr_info *h = to_hba(drv->dev.parent); 717 char model[MODEL_LEN + 1]; 718 unsigned long flags; 719 int ret = 0; 720 721 spin_lock_irqsave(&h->lock, flags); 722 if (h->busy_configuring) 723 ret = -EBUSY; 724 else 725 memcpy(model, drv->model, MODEL_LEN + 1); 726 spin_unlock_irqrestore(&h->lock, flags); 727 728 if (ret) 729 return ret; 730 else 731 return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model); 732} 733static DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL); 734 735static ssize_t dev_show_rev(struct device *dev, 736 struct device_attribute *attr, 737 char *buf) 738{ 739 drive_info_struct *drv = to_drv(dev); 740 struct ctlr_info *h = to_hba(drv->dev.parent); 741 char rev[REV_LEN + 1]; 742 unsigned long flags; 743 int ret = 0; 744 745 spin_lock_irqsave(&h->lock, flags); 746 if (h->busy_configuring) 747 ret = -EBUSY; 748 else 749 memcpy(rev, drv->rev, REV_LEN + 1); 750 spin_unlock_irqrestore(&h->lock, flags); 751 752 if (ret) 753 return ret; 754 else 755 return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev); 756} 757static DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); 758 759static ssize_t cciss_show_lunid(struct device *dev, 760 struct device_attribute *attr, char *buf) 761{ 762 drive_info_struct *drv = to_drv(dev); 763 struct ctlr_info *h = to_hba(drv->dev.parent); 764 unsigned long flags; 765 unsigned char lunid[8]; 766 767 spin_lock_irqsave(&h->lock, flags); 768 if (h->busy_configuring) { 769 spin_unlock_irqrestore(&h->lock, flags); 770 return -EBUSY; 771 } 772 if (!drv->heads) { 773 spin_unlock_irqrestore(&h->lock, flags); 774 return -ENOTTY; 775 } 776 memcpy(lunid, drv->LunID, sizeof(lunid)); 777 spin_unlock_irqrestore(&h->lock, flags); 778 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 779 lunid[0], lunid[1], lunid[2], lunid[3], 780 lunid[4], lunid[5], lunid[6], lunid[7]); 781} 782static DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL); 783 784static ssize_t cciss_show_raid_level(struct device *dev, 785 struct device_attribute *attr, char *buf) 786{ 787 drive_info_struct *drv = to_drv(dev); 788 struct ctlr_info *h = to_hba(drv->dev.parent); 789 int raid; 790 unsigned long flags; 791 792 spin_lock_irqsave(&h->lock, flags); 793 if (h->busy_configuring) { 794 spin_unlock_irqrestore(&h->lock, flags); 795 return -EBUSY; 796 } 797 raid = drv->raid_level; 798 spin_unlock_irqrestore(&h->lock, flags); 799 if (raid < 0 || raid > RAID_UNKNOWN) 800 raid = RAID_UNKNOWN; 801 802 return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n", 803 raid_label[raid]); 804} 805static DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL); 806 807static ssize_t cciss_show_usage_count(struct device *dev, 808 struct device_attribute *attr, char *buf) 809{ 810 drive_info_struct *drv = to_drv(dev); 811 struct ctlr_info *h = to_hba(drv->dev.parent); 812 unsigned long flags; 813 int count; 814 815 spin_lock_irqsave(&h->lock, flags); 816 if (h->busy_configuring) { 817 spin_unlock_irqrestore(&h->lock, flags); 818 return -EBUSY; 819 } 820 count = drv->usage_count; 821 spin_unlock_irqrestore(&h->lock, flags); 822 return snprintf(buf, 20, "%d\n", count); 823} 824static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); 825 826static struct attribute *cciss_host_attrs[] = { 827 &dev_attr_rescan.attr, 828 &dev_attr_resettable.attr, 829 &dev_attr_transport_mode.attr, 830 NULL 831}; 832 833static struct attribute_group cciss_host_attr_group = { 834 .attrs = cciss_host_attrs, 835}; 836 837static const struct attribute_group *cciss_host_attr_groups[] = { 838 &cciss_host_attr_group, 839 NULL 840}; 841 842static struct device_type cciss_host_type = { 843 .name = "cciss_host", 844 .groups = cciss_host_attr_groups, 845 .release = cciss_hba_release, 846}; 847 848static struct attribute *cciss_dev_attrs[] = { 849 &dev_attr_unique_id.attr, 850 &dev_attr_model.attr, 851 &dev_attr_vendor.attr, 852 &dev_attr_rev.attr, 853 &dev_attr_lunid.attr, 854 &dev_attr_raid_level.attr, 855 &dev_attr_usage_count.attr, 856 NULL 857}; 858 859static struct attribute_group cciss_dev_attr_group = { 860 .attrs = cciss_dev_attrs, 861}; 862 863static const struct attribute_group *cciss_dev_attr_groups[] = { 864 &cciss_dev_attr_group, 865 NULL 866}; 867 868static struct device_type cciss_dev_type = { 869 .name = "cciss_device", 870 .groups = cciss_dev_attr_groups, 871 .release = cciss_device_release, 872}; 873 874static struct bus_type cciss_bus_type = { 875 .name = "cciss", 876}; 877 878/* 879 * cciss_hba_release is called when the reference count 880 * of h->dev goes to zero. 881 */ 882static void cciss_hba_release(struct device *dev) 883{ 884 /* 885 * nothing to do, but need this to avoid a warning 886 * about not having a release handler from lib/kref.c. 887 */ 888} 889 890/* 891 * Initialize sysfs entry for each controller. This sets up and registers 892 * the 'cciss#' directory for each individual controller under 893 * /sys/bus/pci/devices/<dev>/. 894 */ 895static int cciss_create_hba_sysfs_entry(struct ctlr_info *h) 896{ 897 device_initialize(&h->dev); 898 h->dev.type = &cciss_host_type; 899 h->dev.bus = &cciss_bus_type; 900 dev_set_name(&h->dev, "%s", h->devname); 901 h->dev.parent = &h->pdev->dev; 902 903 return device_add(&h->dev); 904} 905 906/* 907 * Remove sysfs entries for an hba. 908 */ 909static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) 910{ 911 device_del(&h->dev); 912 put_device(&h->dev); /* final put. */ 913} 914 915/* cciss_device_release is called when the reference count 916 * of h->drv[x]dev goes to zero. 917 */ 918static void cciss_device_release(struct device *dev) 919{ 920 drive_info_struct *drv = to_drv(dev); 921 kfree(drv); 922} 923 924/* 925 * Initialize sysfs for each logical drive. This sets up and registers 926 * the 'c#d#' directory for each individual logical drive under 927 * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from 928 * /sys/block/cciss!c#d# to this entry. 929 */ 930static long cciss_create_ld_sysfs_entry(struct ctlr_info *h, 931 int drv_index) 932{ 933 struct device *dev; 934 935 if (h->drv[drv_index]->device_initialized) 936 return 0; 937 938 dev = &h->drv[drv_index]->dev; 939 device_initialize(dev); 940 dev->type = &cciss_dev_type; 941 dev->bus = &cciss_bus_type; 942 dev_set_name(dev, "c%dd%d", h->ctlr, drv_index); 943 dev->parent = &h->dev; 944 h->drv[drv_index]->device_initialized = 1; 945 return device_add(dev); 946} 947 948/* 949 * Remove sysfs entries for a logical drive. 950 */ 951static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index, 952 int ctlr_exiting) 953{ 954 struct device *dev = &h->drv[drv_index]->dev; 955 956 /* special case for c*d0, we only destroy it on controller exit */ 957 if (drv_index == 0 && !ctlr_exiting) 958 return; 959 960 device_del(dev); 961 put_device(dev); /* the "final" put. */ 962 h->drv[drv_index] = NULL; 963} 964 965/* 966 * For operations that cannot sleep, a command block is allocated at init, 967 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 968 * which ones are free or in use. 969 */ 970static CommandList_struct *cmd_alloc(ctlr_info_t *h) 971{ 972 CommandList_struct *c; 973 int i; 974 u64bit temp64; 975 dma_addr_t cmd_dma_handle, err_dma_handle; 976 977 do { 978 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 979 if (i == h->nr_cmds) 980 return NULL; 981 } while (test_and_set_bit(i & (BITS_PER_LONG - 1), 982 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 983 c = h->cmd_pool + i; 984 memset(c, 0, sizeof(CommandList_struct)); 985 cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct); 986 c->err_info = h->errinfo_pool + i; 987 memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 988 err_dma_handle = h->errinfo_pool_dhandle 989 + i * sizeof(ErrorInfo_struct); 990 h->nr_allocs++; 991 992 c->cmdindex = i; 993 994 INIT_LIST_HEAD(&c->list); 995 c->busaddr = (__u32) cmd_dma_handle; 996 temp64.val = (__u64) err_dma_handle; 997 c->ErrDesc.Addr.lower = temp64.val32.lower; 998 c->ErrDesc.Addr.upper = temp64.val32.upper; 999 c->ErrDesc.Len = sizeof(ErrorInfo_struct); 1000 1001 c->ctlr = h->ctlr; 1002 return c; 1003} 1004 1005/* allocate a command using pci_alloc_consistent, used for ioctls, 1006 * etc., not for the main i/o path. 1007 */ 1008static CommandList_struct *cmd_special_alloc(ctlr_info_t *h) 1009{ 1010 CommandList_struct *c; 1011 u64bit temp64; 1012 dma_addr_t cmd_dma_handle, err_dma_handle; 1013 1014 c = (CommandList_struct *) pci_alloc_consistent(h->pdev, 1015 sizeof(CommandList_struct), &cmd_dma_handle); 1016 if (c == NULL) 1017 return NULL; 1018 memset(c, 0, sizeof(CommandList_struct)); 1019 1020 c->cmdindex = -1; 1021 1022 c->err_info = (ErrorInfo_struct *) 1023 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct), 1024 &err_dma_handle); 1025 1026 if (c->err_info == NULL) { 1027 pci_free_consistent(h->pdev, 1028 sizeof(CommandList_struct), c, cmd_dma_handle); 1029 return NULL; 1030 } 1031 memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 1032 1033 INIT_LIST_HEAD(&c->list); 1034 c->busaddr = (__u32) cmd_dma_handle; 1035 temp64.val = (__u64) err_dma_handle; 1036 c->ErrDesc.Addr.lower = temp64.val32.lower; 1037 c->ErrDesc.Addr.upper = temp64.val32.upper; 1038 c->ErrDesc.Len = sizeof(ErrorInfo_struct); 1039 1040 c->ctlr = h->ctlr; 1041 return c; 1042} 1043 1044static void cmd_free(ctlr_info_t *h, CommandList_struct *c) 1045{ 1046 int i; 1047 1048 i = c - h->cmd_pool; 1049 clear_bit(i & (BITS_PER_LONG - 1), 1050 h->cmd_pool_bits + (i / BITS_PER_LONG)); 1051 h->nr_frees++; 1052} 1053 1054static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c) 1055{ 1056 u64bit temp64; 1057 1058 temp64.val32.lower = c->ErrDesc.Addr.lower; 1059 temp64.val32.upper = c->ErrDesc.Addr.upper; 1060 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), 1061 c->err_info, (dma_addr_t) temp64.val); 1062 pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, 1063 (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr)); 1064} 1065 1066static inline ctlr_info_t *get_host(struct gendisk *disk) 1067{ 1068 return disk->queue->queuedata; 1069} 1070 1071static inline drive_info_struct *get_drv(struct gendisk *disk) 1072{ 1073 return disk->private_data; 1074} 1075 1076/* 1077 * Open. Make sure the device is really there. 1078 */ 1079static int cciss_open(struct block_device *bdev, fmode_t mode) 1080{ 1081 ctlr_info_t *h = get_host(bdev->bd_disk); 1082 drive_info_struct *drv = get_drv(bdev->bd_disk); 1083 1084 dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name); 1085 if (drv->busy_configuring) 1086 return -EBUSY; 1087 /* 1088 * Root is allowed to open raw volume zero even if it's not configured 1089 * so array config can still work. Root is also allowed to open any 1090 * volume that has a LUN ID, so it can issue IOCTL to reread the 1091 * disk information. I don't think I really like this 1092 * but I'm already using way to many device nodes to claim another one 1093 * for "raw controller". 1094 */ 1095 if (drv->heads == 0) { 1096 if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */ 1097 /* if not node 0 make sure it is a partition = 0 */ 1098 if (MINOR(bdev->bd_dev) & 0x0f) { 1099 return -ENXIO; 1100 /* if it is, make sure we have a LUN ID */ 1101 } else if (memcmp(drv->LunID, CTLR_LUNID, 1102 sizeof(drv->LunID))) { 1103 return -ENXIO; 1104 } 1105 } 1106 if (!capable(CAP_SYS_ADMIN)) 1107 return -EPERM; 1108 } 1109 drv->usage_count++; 1110 h->usage_count++; 1111 return 0; 1112} 1113 1114static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode) 1115{ 1116 int ret; 1117 1118 mutex_lock(&cciss_mutex); 1119 ret = cciss_open(bdev, mode); 1120 mutex_unlock(&cciss_mutex); 1121 1122 return ret; 1123} 1124 1125/* 1126 * Close. Sync first. 1127 */ 1128static int cciss_release(struct gendisk *disk, fmode_t mode) 1129{ 1130 ctlr_info_t *h; 1131 drive_info_struct *drv; 1132 1133 mutex_lock(&cciss_mutex); 1134 h = get_host(disk); 1135 drv = get_drv(disk); 1136 dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name); 1137 drv->usage_count--; 1138 h->usage_count--; 1139 mutex_unlock(&cciss_mutex); 1140 return 0; 1141} 1142 1143static int do_ioctl(struct block_device *bdev, fmode_t mode, 1144 unsigned cmd, unsigned long arg) 1145{ 1146 int ret; 1147 mutex_lock(&cciss_mutex); 1148 ret = cciss_ioctl(bdev, mode, cmd, arg); 1149 mutex_unlock(&cciss_mutex); 1150 return ret; 1151} 1152 1153#ifdef CONFIG_COMPAT 1154 1155static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, 1156 unsigned cmd, unsigned long arg); 1157static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, 1158 unsigned cmd, unsigned long arg); 1159 1160static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode, 1161 unsigned cmd, unsigned long arg) 1162{ 1163 switch (cmd) { 1164 case CCISS_GETPCIINFO: 1165 case CCISS_GETINTINFO: 1166 case CCISS_SETINTINFO: 1167 case CCISS_GETNODENAME: 1168 case CCISS_SETNODENAME: 1169 case CCISS_GETHEARTBEAT: 1170 case CCISS_GETBUSTYPES: 1171 case CCISS_GETFIRMVER: 1172 case CCISS_GETDRIVVER: 1173 case CCISS_REVALIDVOLS: 1174 case CCISS_DEREGDISK: 1175 case CCISS_REGNEWDISK: 1176 case CCISS_REGNEWD: 1177 case CCISS_RESCANDISK: 1178 case CCISS_GETLUNINFO: 1179 return do_ioctl(bdev, mode, cmd, arg); 1180 1181 case CCISS_PASSTHRU32: 1182 return cciss_ioctl32_passthru(bdev, mode, cmd, arg); 1183 case CCISS_BIG_PASSTHRU32: 1184 return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg); 1185 1186 default: 1187 return -ENOIOCTLCMD; 1188 } 1189} 1190 1191static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, 1192 unsigned cmd, unsigned long arg) 1193{ 1194 IOCTL32_Command_struct __user *arg32 = 1195 (IOCTL32_Command_struct __user *) arg; 1196 IOCTL_Command_struct arg64; 1197 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 1198 int err; 1199 u32 cp; 1200 1201 err = 0; 1202 err |= 1203 copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 1204 sizeof(arg64.LUN_info)); 1205 err |= 1206 copy_from_user(&arg64.Request, &arg32->Request, 1207 sizeof(arg64.Request)); 1208 err |= 1209 copy_from_user(&arg64.error_info, &arg32->error_info, 1210 sizeof(arg64.error_info)); 1211 err |= get_user(arg64.buf_size, &arg32->buf_size); 1212 err |= get_user(cp, &arg32->buf); 1213 arg64.buf = compat_ptr(cp); 1214 err |= copy_to_user(p, &arg64, sizeof(arg64)); 1215 1216 if (err) 1217 return -EFAULT; 1218 1219 err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); 1220 if (err) 1221 return err; 1222 err |= 1223 copy_in_user(&arg32->error_info, &p->error_info, 1224 sizeof(arg32->error_info)); 1225 if (err) 1226 return -EFAULT; 1227 return err; 1228} 1229 1230static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, 1231 unsigned cmd, unsigned long arg) 1232{ 1233 BIG_IOCTL32_Command_struct __user *arg32 = 1234 (BIG_IOCTL32_Command_struct __user *) arg; 1235 BIG_IOCTL_Command_struct arg64; 1236 BIG_IOCTL_Command_struct __user *p = 1237 compat_alloc_user_space(sizeof(arg64)); 1238 int err; 1239 u32 cp; 1240 1241 memset(&arg64, 0, sizeof(arg64)); 1242 err = 0; 1243 err |= 1244 copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 1245 sizeof(arg64.LUN_info)); 1246 err |= 1247 copy_from_user(&arg64.Request, &arg32->Request, 1248 sizeof(arg64.Request)); 1249 err |= 1250 copy_from_user(&arg64.error_info, &arg32->error_info, 1251 sizeof(arg64.error_info)); 1252 err |= get_user(arg64.buf_size, &arg32->buf_size); 1253 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 1254 err |= get_user(cp, &arg32->buf); 1255 arg64.buf = compat_ptr(cp); 1256 err |= copy_to_user(p, &arg64, sizeof(arg64)); 1257 1258 if (err) 1259 return -EFAULT; 1260 1261 err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); 1262 if (err) 1263 return err; 1264 err |= 1265 copy_in_user(&arg32->error_info, &p->error_info, 1266 sizeof(arg32->error_info)); 1267 if (err) 1268 return -EFAULT; 1269 return err; 1270} 1271#endif 1272 1273static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1274{ 1275 drive_info_struct *drv = get_drv(bdev->bd_disk); 1276 1277 if (!drv->cylinders) 1278 return -ENXIO; 1279 1280 geo->heads = drv->heads; 1281 geo->sectors = drv->sectors; 1282 geo->cylinders = drv->cylinders; 1283 return 0; 1284} 1285 1286static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c) 1287{ 1288 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 1289 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 1290 (void)check_for_unit_attention(h, c); 1291} 1292 1293static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp) 1294{ 1295 cciss_pci_info_struct pciinfo; 1296 1297 if (!argp) 1298 return -EINVAL; 1299 pciinfo.domain = pci_domain_nr(h->pdev->bus); 1300 pciinfo.bus = h->pdev->bus->number; 1301 pciinfo.dev_fn = h->pdev->devfn; 1302 pciinfo.board_id = h->board_id; 1303 if (copy_to_user(argp, &pciinfo, sizeof(cciss_pci_info_struct))) 1304 return -EFAULT; 1305 return 0; 1306} 1307 1308static int cciss_getintinfo(ctlr_info_t *h, void __user *argp) 1309{ 1310 cciss_coalint_struct intinfo; 1311 1312 if (!argp) 1313 return -EINVAL; 1314 intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay); 1315 intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount); 1316 if (copy_to_user 1317 (argp, &intinfo, sizeof(cciss_coalint_struct))) 1318 return -EFAULT; 1319 return 0; 1320} 1321 1322static int cciss_setintinfo(ctlr_info_t *h, void __user *argp) 1323{ 1324 cciss_coalint_struct intinfo; 1325 unsigned long flags; 1326 int i; 1327 1328 if (!argp) 1329 return -EINVAL; 1330 if (!capable(CAP_SYS_ADMIN)) 1331 return -EPERM; 1332 if (copy_from_user(&intinfo, argp, sizeof(intinfo))) 1333 return -EFAULT; 1334 if ((intinfo.delay == 0) && (intinfo.count == 0)) 1335 return -EINVAL; 1336 spin_lock_irqsave(&h->lock, flags); 1337 /* Update the field, and then ring the doorbell */ 1338 writel(intinfo.delay, &(h->cfgtable->HostWrite.CoalIntDelay)); 1339 writel(intinfo.count, &(h->cfgtable->HostWrite.CoalIntCount)); 1340 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 1341 1342 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { 1343 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) 1344 break; 1345 udelay(1000); /* delay and try again */ 1346 } 1347 spin_unlock_irqrestore(&h->lock, flags); 1348 if (i >= MAX_IOCTL_CONFIG_WAIT) 1349 return -EAGAIN; 1350 return 0; 1351} 1352 1353static int cciss_getnodename(ctlr_info_t *h, void __user *argp) 1354{ 1355 NodeName_type NodeName; 1356 int i; 1357 1358 if (!argp) 1359 return -EINVAL; 1360 for (i = 0; i < 16; i++) 1361 NodeName[i] = readb(&h->cfgtable->ServerName[i]); 1362 if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) 1363 return -EFAULT; 1364 return 0; 1365} 1366 1367static int cciss_setnodename(ctlr_info_t *h, void __user *argp) 1368{ 1369 NodeName_type NodeName; 1370 unsigned long flags; 1371 int i; 1372 1373 if (!argp) 1374 return -EINVAL; 1375 if (!capable(CAP_SYS_ADMIN)) 1376 return -EPERM; 1377 if (copy_from_user(NodeName, argp, sizeof(NodeName_type))) 1378 return -EFAULT; 1379 spin_lock_irqsave(&h->lock, flags); 1380 /* Update the field, and then ring the doorbell */ 1381 for (i = 0; i < 16; i++) 1382 writeb(NodeName[i], &h->cfgtable->ServerName[i]); 1383 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 1384 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { 1385 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) 1386 break; 1387 udelay(1000); /* delay and try again */ 1388 } 1389 spin_unlock_irqrestore(&h->lock, flags); 1390 if (i >= MAX_IOCTL_CONFIG_WAIT) 1391 return -EAGAIN; 1392 return 0; 1393} 1394 1395static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) 1396{ 1397 Heartbeat_type heartbeat; 1398 1399 if (!argp) 1400 return -EINVAL; 1401 heartbeat = readl(&h->cfgtable->HeartBeat); 1402 if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type))) 1403 return -EFAULT; 1404 return 0; 1405} 1406 1407static int cciss_getbustypes(ctlr_info_t *h, void __user *argp) 1408{ 1409 BusTypes_type BusTypes; 1410 1411 if (!argp) 1412 return -EINVAL; 1413 BusTypes = readl(&h->cfgtable->BusTypes); 1414 if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type))) 1415 return -EFAULT; 1416 return 0; 1417} 1418 1419static int cciss_getfirmver(ctlr_info_t *h, void __user *argp) 1420{ 1421 FirmwareVer_type firmware; 1422 1423 if (!argp) 1424 return -EINVAL; 1425 memcpy(firmware, h->firm_ver, 4); 1426 1427 if (copy_to_user 1428 (argp, firmware, sizeof(FirmwareVer_type))) 1429 return -EFAULT; 1430 return 0; 1431} 1432 1433static int cciss_getdrivver(ctlr_info_t *h, void __user *argp) 1434{ 1435 DriverVer_type DriverVer = DRIVER_VERSION; 1436 1437 if (!argp) 1438 return -EINVAL; 1439 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 1440 return -EFAULT; 1441 return 0; 1442} 1443 1444static int cciss_getluninfo(ctlr_info_t *h, 1445 struct gendisk *disk, void __user *argp) 1446{ 1447 LogvolInfo_struct luninfo; 1448 drive_info_struct *drv = get_drv(disk); 1449 1450 if (!argp) 1451 return -EINVAL; 1452 memcpy(&luninfo.LunID, drv->LunID, sizeof(luninfo.LunID)); 1453 luninfo.num_opens = drv->usage_count; 1454 luninfo.num_parts = 0; 1455 if (copy_to_user(argp, &luninfo, sizeof(LogvolInfo_struct))) 1456 return -EFAULT; 1457 return 0; 1458} 1459 1460static int cciss_passthru(ctlr_info_t *h, void __user *argp) 1461{ 1462 IOCTL_Command_struct iocommand; 1463 CommandList_struct *c; 1464 char *buff = NULL; 1465 u64bit temp64; 1466 DECLARE_COMPLETION_ONSTACK(wait); 1467 1468 if (!argp) 1469 return -EINVAL; 1470 1471 if (!capable(CAP_SYS_RAWIO)) 1472 return -EPERM; 1473 1474 if (copy_from_user 1475 (&iocommand, argp, sizeof(IOCTL_Command_struct))) 1476 return -EFAULT; 1477 if ((iocommand.buf_size < 1) && 1478 (iocommand.Request.Type.Direction != XFER_NONE)) { 1479 return -EINVAL; 1480 } 1481 if (iocommand.buf_size > 0) { 1482 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 1483 if (buff == NULL) 1484 return -EFAULT; 1485 } 1486 if (iocommand.Request.Type.Direction == XFER_WRITE) { 1487 /* Copy the data into the buffer we created */ 1488 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { 1489 kfree(buff); 1490 return -EFAULT; 1491 } 1492 } else { 1493 memset(buff, 0, iocommand.buf_size); 1494 } 1495 c = cmd_special_alloc(h); 1496 if (!c) { 1497 kfree(buff); 1498 return -ENOMEM; 1499 } 1500 /* Fill in the command type */ 1501 c->cmd_type = CMD_IOCTL_PEND; 1502 /* Fill in Command Header */ 1503 c->Header.ReplyQueue = 0; /* unused in simple mode */ 1504 if (iocommand.buf_size > 0) { /* buffer to fill */ 1505 c->Header.SGList = 1; 1506 c->Header.SGTotal = 1; 1507 } else { /* no buffers to fill */ 1508 c->Header.SGList = 0; 1509 c->Header.SGTotal = 0; 1510 } 1511 c->Header.LUN = iocommand.LUN_info; 1512 /* use the kernel address the cmd block for tag */ 1513 c->Header.Tag.lower = c->busaddr; 1514 1515 /* Fill in Request block */ 1516 c->Request = iocommand.Request; 1517 1518 /* Fill in the scatter gather information */ 1519 if (iocommand.buf_size > 0) { 1520 temp64.val = pci_map_single(h->pdev, buff, 1521 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 1522 c->SG[0].Addr.lower = temp64.val32.lower; 1523 c->SG[0].Addr.upper = temp64.val32.upper; 1524 c->SG[0].Len = iocommand.buf_size; 1525 c->SG[0].Ext = 0; /* we are not chaining */ 1526 } 1527 c->waiting = &wait; 1528 1529 enqueue_cmd_and_start_io(h, c); 1530 wait_for_completion(&wait); 1531 1532 /* unlock the buffers from DMA */ 1533 temp64.val32.lower = c->SG[0].Addr.lower; 1534 temp64.val32.upper = c->SG[0].Addr.upper; 1535 pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, iocommand.buf_size, 1536 PCI_DMA_BIDIRECTIONAL); 1537 check_ioctl_unit_attention(h, c); 1538 1539 /* Copy the error information out */ 1540 iocommand.error_info = *(c->err_info); 1541 if (copy_to_user(argp, &iocommand, sizeof(IOCTL_Command_struct))) { 1542 kfree(buff); 1543 cmd_special_free(h, c); 1544 return -EFAULT; 1545 } 1546 1547 if (iocommand.Request.Type.Direction == XFER_READ) { 1548 /* Copy the data out of the buffer we created */ 1549 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 1550 kfree(buff); 1551 cmd_special_free(h, c); 1552 return -EFAULT; 1553 } 1554 } 1555 kfree(buff); 1556 cmd_special_free(h, c); 1557 return 0; 1558} 1559 1560static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp) 1561{ 1562 BIG_IOCTL_Command_struct *ioc; 1563 CommandList_struct *c; 1564 unsigned char **buff = NULL; 1565 int *buff_size = NULL; 1566 u64bit temp64; 1567 BYTE sg_used = 0; 1568 int status = 0; 1569 int i; 1570 DECLARE_COMPLETION_ONSTACK(wait); 1571 __u32 left; 1572 __u32 sz; 1573 BYTE __user *data_ptr; 1574 1575 if (!argp) 1576 return -EINVAL; 1577 if (!capable(CAP_SYS_RAWIO)) 1578 return -EPERM; 1579 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); 1580 if (!ioc) { 1581 status = -ENOMEM; 1582 goto cleanup1; 1583 } 1584 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 1585 status = -EFAULT; 1586 goto cleanup1; 1587 } 1588 if ((ioc->buf_size < 1) && 1589 (ioc->Request.Type.Direction != XFER_NONE)) { 1590 status = -EINVAL; 1591 goto cleanup1; 1592 } 1593 /* Check kmalloc limits using all SGs */ 1594 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 1595 status = -EINVAL; 1596 goto cleanup1; 1597 } 1598 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { 1599 status = -EINVAL; 1600 goto cleanup1; 1601 } 1602 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); 1603 if (!buff) { 1604 status = -ENOMEM; 1605 goto cleanup1; 1606 } 1607 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); 1608 if (!buff_size) { 1609 status = -ENOMEM; 1610 goto cleanup1; 1611 } 1612 left = ioc->buf_size; 1613 data_ptr = ioc->buf; 1614 while (left) { 1615 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 1616 buff_size[sg_used] = sz; 1617 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 1618 if (buff[sg_used] == NULL) { 1619 status = -ENOMEM; 1620 goto cleanup1; 1621 } 1622 if (ioc->Request.Type.Direction == XFER_WRITE) { 1623 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 1624 status = -EFAULT; 1625 goto cleanup1; 1626 } 1627 } else { 1628 memset(buff[sg_used], 0, sz); 1629 } 1630 left -= sz; 1631 data_ptr += sz; 1632 sg_used++; 1633 } 1634 c = cmd_special_alloc(h); 1635 if (!c) { 1636 status = -ENOMEM; 1637 goto cleanup1; 1638 } 1639 c->cmd_type = CMD_IOCTL_PEND; 1640 c->Header.ReplyQueue = 0; 1641 c->Header.SGList = sg_used; 1642 c->Header.SGTotal = sg_used; 1643 c->Header.LUN = ioc->LUN_info; 1644 c->Header.Tag.lower = c->busaddr; 1645 1646 c->Request = ioc->Request; 1647 for (i = 0; i < sg_used; i++) { 1648 temp64.val = pci_map_single(h->pdev, buff[i], buff_size[i], 1649 PCI_DMA_BIDIRECTIONAL); 1650 c->SG[i].Addr.lower = temp64.val32.lower; 1651 c->SG[i].Addr.upper = temp64.val32.upper; 1652 c->SG[i].Len = buff_size[i]; 1653 c->SG[i].Ext = 0; /* we are not chaining */ 1654 } 1655 c->waiting = &wait; 1656 enqueue_cmd_and_start_io(h, c); 1657 wait_for_completion(&wait); 1658 /* unlock the buffers from DMA */ 1659 for (i = 0; i < sg_used; i++) { 1660 temp64.val32.lower = c->SG[i].Addr.lower; 1661 temp64.val32.upper = c->SG[i].Addr.upper; 1662 pci_unmap_single(h->pdev, 1663 (dma_addr_t) temp64.val, buff_size[i], 1664 PCI_DMA_BIDIRECTIONAL); 1665 } 1666 check_ioctl_unit_attention(h, c); 1667 /* Copy the error information out */ 1668 ioc->error_info = *(c->err_info); 1669 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 1670 cmd_special_free(h, c); 1671 status = -EFAULT; 1672 goto cleanup1; 1673 } 1674 if (ioc->Request.Type.Direction == XFER_READ) { 1675 /* Copy the data out of the buffer we created */ 1676 BYTE __user *ptr = ioc->buf; 1677 for (i = 0; i < sg_used; i++) { 1678 if (copy_to_user(ptr, buff[i], buff_size[i])) { 1679 cmd_special_free(h, c); 1680 status = -EFAULT; 1681 goto cleanup1; 1682 } 1683 ptr += buff_size[i]; 1684 } 1685 } 1686 cmd_special_free(h, c); 1687 status = 0; 1688cleanup1: 1689 if (buff) { 1690 for (i = 0; i < sg_used; i++) 1691 kfree(buff[i]); 1692 kfree(buff); 1693 } 1694 kfree(buff_size); 1695 kfree(ioc); 1696 return status; 1697} 1698 1699static int cciss_ioctl(struct block_device *bdev, fmode_t mode, 1700 unsigned int cmd, unsigned long arg) 1701{ 1702 struct gendisk *disk = bdev->bd_disk; 1703 ctlr_info_t *h = get_host(disk); 1704 void __user *argp = (void __user *)arg; 1705 1706 dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n", 1707 cmd, arg); 1708 switch (cmd) { 1709 case CCISS_GETPCIINFO: 1710 return cciss_getpciinfo(h, argp); 1711 case CCISS_GETINTINFO: 1712 return cciss_getintinfo(h, argp); 1713 case CCISS_SETINTINFO: 1714 return cciss_setintinfo(h, argp); 1715 case CCISS_GETNODENAME: 1716 return cciss_getnodename(h, argp); 1717 case CCISS_SETNODENAME: 1718 return cciss_setnodename(h, argp); 1719 case CCISS_GETHEARTBEAT: 1720 return cciss_getheartbeat(h, argp); 1721 case CCISS_GETBUSTYPES: 1722 return cciss_getbustypes(h, argp); 1723 case CCISS_GETFIRMVER: 1724 return cciss_getfirmver(h, argp); 1725 case CCISS_GETDRIVVER: 1726 return cciss_getdrivver(h, argp); 1727 case CCISS_DEREGDISK: 1728 case CCISS_REGNEWD: 1729 case CCISS_REVALIDVOLS: 1730 return rebuild_lun_table(h, 0, 1); 1731 case CCISS_GETLUNINFO: 1732 return cciss_getluninfo(h, disk, argp); 1733 case CCISS_PASSTHRU: 1734 return cciss_passthru(h, argp); 1735 case CCISS_BIG_PASSTHRU: 1736 return cciss_bigpassthru(h, argp); 1737 1738 /* scsi_cmd_blk_ioctl handles these, below, though some are not */ 1739 /* very meaningful for cciss. SG_IO is the main one people want. */ 1740 1741 case SG_GET_VERSION_NUM: 1742 case SG_SET_TIMEOUT: 1743 case SG_GET_TIMEOUT: 1744 case SG_GET_RESERVED_SIZE: 1745 case SG_SET_RESERVED_SIZE: 1746 case SG_EMULATED_HOST: 1747 case SG_IO: 1748 case SCSI_IOCTL_SEND_COMMAND: 1749 return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); 1750 1751 /* scsi_cmd_blk_ioctl would normally handle these, below, but */ 1752 /* they aren't a good fit for cciss, as CD-ROMs are */ 1753 /* not supported, and we don't have any bus/target/lun */ 1754 /* which we present to the kernel. */ 1755 1756 case CDROM_SEND_PACKET: 1757 case CDROMCLOSETRAY: 1758 case CDROMEJECT: 1759 case SCSI_IOCTL_GET_IDLUN: 1760 case SCSI_IOCTL_GET_BUS_NUMBER: 1761 default: 1762 return -ENOTTY; 1763 } 1764} 1765 1766static void cciss_check_queues(ctlr_info_t *h) 1767{ 1768 int start_queue = h->next_to_run; 1769 int i; 1770 1771 /* check to see if we have maxed out the number of commands that can 1772 * be placed on the queue. If so then exit. We do this check here 1773 * in case the interrupt we serviced was from an ioctl and did not 1774 * free any new commands. 1775 */ 1776 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) 1777 return; 1778 1779 /* We have room on the queue for more commands. Now we need to queue 1780 * them up. We will also keep track of the next queue to run so 1781 * that every queue gets a chance to be started first. 1782 */ 1783 for (i = 0; i < h->highest_lun + 1; i++) { 1784 int curr_queue = (start_queue + i) % (h->highest_lun + 1); 1785 /* make sure the disk has been added and the drive is real 1786 * because this can be called from the middle of init_one. 1787 */ 1788 if (!h->drv[curr_queue]) 1789 continue; 1790 if (!(h->drv[curr_queue]->queue) || 1791 !(h->drv[curr_queue]->heads)) 1792 continue; 1793 blk_start_queue(h->gendisk[curr_queue]->queue); 1794 1795 /* check to see if we have maxed out the number of commands 1796 * that can be placed on the queue. 1797 */ 1798 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) { 1799 if (curr_queue == start_queue) { 1800 h->next_to_run = 1801 (start_queue + 1) % (h->highest_lun + 1); 1802 break; 1803 } else { 1804 h->next_to_run = curr_queue; 1805 break; 1806 } 1807 } 1808 } 1809} 1810 1811static void cciss_softirq_done(struct request *rq) 1812{ 1813 CommandList_struct *c = rq->completion_data; 1814 ctlr_info_t *h = hba[c->ctlr]; 1815 SGDescriptor_struct *curr_sg = c->SG; 1816 u64bit temp64; 1817 unsigned long flags; 1818 int i, ddir; 1819 int sg_index = 0; 1820 1821 if (c->Request.Type.Direction == XFER_READ) 1822 ddir = PCI_DMA_FROMDEVICE; 1823 else 1824 ddir = PCI_DMA_TODEVICE; 1825 1826 /* command did not need to be retried */ 1827 /* unmap the DMA mapping for all the scatter gather elements */ 1828 for (i = 0; i < c->Header.SGList; i++) { 1829 if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { 1830 cciss_unmap_sg_chain_block(h, c); 1831 /* Point to the next block */ 1832 curr_sg = h->cmd_sg_list[c->cmdindex]; 1833 sg_index = 0; 1834 } 1835 temp64.val32.lower = curr_sg[sg_index].Addr.lower; 1836 temp64.val32.upper = curr_sg[sg_index].Addr.upper; 1837 pci_unmap_page(h->pdev, temp64.val, curr_sg[sg_index].Len, 1838 ddir); 1839 ++sg_index; 1840 } 1841 1842 dev_dbg(&h->pdev->dev, "Done with %p\n", rq); 1843 1844 /* set the residual count for pc requests */ 1845 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 1846 rq->resid_len = c->err_info->ResidualCnt; 1847 1848 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); 1849 1850 spin_lock_irqsave(&h->lock, flags); 1851 cmd_free(h, c); 1852 cciss_check_queues(h); 1853 spin_unlock_irqrestore(&h->lock, flags); 1854} 1855 1856static inline void log_unit_to_scsi3addr(ctlr_info_t *h, 1857 unsigned char scsi3addr[], uint32_t log_unit) 1858{ 1859 memcpy(scsi3addr, h->drv[log_unit]->LunID, 1860 sizeof(h->drv[log_unit]->LunID)); 1861} 1862 1863/* This function gets the SCSI vendor, model, and revision of a logical drive 1864 * via the inquiry page 0. Model, vendor, and rev are set to empty strings if 1865 * they cannot be read. 1866 */ 1867static void cciss_get_device_descr(ctlr_info_t *h, int logvol, 1868 char *vendor, char *model, char *rev) 1869{ 1870 int rc; 1871 InquiryData_struct *inq_buf; 1872 unsigned char scsi3addr[8]; 1873 1874 *vendor = '\0'; 1875 *model = '\0'; 1876 *rev = '\0'; 1877 1878 inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); 1879 if (!inq_buf) 1880 return; 1881 1882 log_unit_to_scsi3addr(h, scsi3addr, logvol); 1883 rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0, 1884 scsi3addr, TYPE_CMD); 1885 if (rc == IO_OK) { 1886 memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN); 1887 vendor[VENDOR_LEN] = '\0'; 1888 memcpy(model, &inq_buf->data_byte[16], MODEL_LEN); 1889 model[MODEL_LEN] = '\0'; 1890 memcpy(rev, &inq_buf->data_byte[32], REV_LEN); 1891 rev[REV_LEN] = '\0'; 1892 } 1893 1894 kfree(inq_buf); 1895 return; 1896} 1897 1898/* This function gets the serial number of a logical drive via 1899 * inquiry page 0x83. Serial no. is 16 bytes. If the serial 1900 * number cannot be had, for whatever reason, 16 bytes of 0xff 1901 * are returned instead. 1902 */ 1903static void cciss_get_serial_no(ctlr_info_t *h, int logvol, 1904 unsigned char *serial_no, int buflen) 1905{ 1906#define PAGE_83_INQ_BYTES 64 1907 int rc; 1908 unsigned char *buf; 1909 unsigned char scsi3addr[8]; 1910 1911 if (buflen > 16) 1912 buflen = 16; 1913 memset(serial_no, 0xff, buflen); 1914 buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL); 1915 if (!buf) 1916 return; 1917 memset(serial_no, 0, buflen); 1918 log_unit_to_scsi3addr(h, scsi3addr, logvol); 1919 rc = sendcmd_withirq(h, CISS_INQUIRY, buf, 1920 PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); 1921 if (rc == IO_OK) 1922 memcpy(serial_no, &buf[8], buflen); 1923 kfree(buf); 1924 return; 1925} 1926 1927/* 1928 * cciss_add_disk sets up the block device queue for a logical drive 1929 */ 1930static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, 1931 int drv_index) 1932{ 1933 disk->queue = blk_init_queue(do_cciss_request, &h->lock); 1934 if (!disk->queue) 1935 goto init_queue_failure; 1936 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); 1937 disk->major = h->major; 1938 disk->first_minor = drv_index << NWD_SHIFT; 1939 disk->fops = &cciss_fops; 1940 if (cciss_create_ld_sysfs_entry(h, drv_index)) 1941 goto cleanup_queue; 1942 disk->private_data = h->drv[drv_index]; 1943 disk->driverfs_dev = &h->drv[drv_index]->dev; 1944 1945 /* Set up queue information */ 1946 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); 1947 1948 /* This is a hardware imposed limit. */ 1949 blk_queue_max_segments(disk->queue, h->maxsgentries); 1950 1951 blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors); 1952 1953 blk_queue_softirq_done(disk->queue, cciss_softirq_done); 1954 1955 disk->queue->queuedata = h; 1956 1957 blk_queue_logical_block_size(disk->queue, 1958 h->drv[drv_index]->block_size); 1959 1960 /* Make sure all queue data is written out before */ 1961 /* setting h->drv[drv_index]->queue, as setting this */ 1962 /* allows the interrupt handler to start the queue */ 1963 wmb(); 1964 h->drv[drv_index]->queue = disk->queue; 1965 add_disk(disk); 1966 return 0; 1967 1968cleanup_queue: 1969 blk_cleanup_queue(disk->queue); 1970 disk->queue = NULL; 1971init_queue_failure: 1972 return -1; 1973} 1974 1975/* This function will check the usage_count of the drive to be updated/added. 1976 * If the usage_count is zero and it is a heretofore unknown drive, or, 1977 * the drive's capacity, geometry, or serial number has changed, 1978 * then the drive information will be updated and the disk will be 1979 * re-registered with the kernel. If these conditions don't hold, 1980 * then it will be left alone for the next reboot. The exception to this 1981 * is disk 0 which will always be left registered with the kernel since it 1982 * is also the controller node. Any changes to disk 0 will show up on 1983 * the next reboot. 1984 */ 1985static void cciss_update_drive_info(ctlr_info_t *h, int drv_index, 1986 int first_time, int via_ioctl) 1987{ 1988 struct gendisk *disk; 1989 InquiryData_struct *inq_buff = NULL; 1990 unsigned int block_size; 1991 sector_t total_size; 1992 unsigned long flags = 0; 1993 int ret = 0; 1994 drive_info_struct *drvinfo; 1995 1996 /* Get information about the disk and modify the driver structure */ 1997 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); 1998 drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL); 1999 if (inq_buff == NULL || drvinfo == NULL) 2000 goto mem_msg; 2001 2002 /* testing to see if 16-byte CDBs are already being used */ 2003 if (h->cciss_read == CCISS_READ_16) { 2004 cciss_read_capacity_16(h, drv_index, 2005 &total_size, &block_size); 2006 2007 } else { 2008 cciss_read_capacity(h, drv_index, &total_size, &block_size); 2009 /* if read_capacity returns all F's this volume is >2TB */ 2010 /* in size so we switch to 16-byte CDB's for all */ 2011 /* read/write ops */ 2012 if (total_size == 0xFFFFFFFFULL) { 2013 cciss_read_capacity_16(h, drv_index, 2014 &total_size, &block_size); 2015 h->cciss_read = CCISS_READ_16; 2016 h->cciss_write = CCISS_WRITE_16; 2017 } else { 2018 h->cciss_read = CCISS_READ_10; 2019 h->cciss_write = CCISS_WRITE_10; 2020 } 2021 } 2022 2023 cciss_geometry_inquiry(h, drv_index, total_size, block_size, 2024 inq_buff, drvinfo); 2025 drvinfo->block_size = block_size; 2026 drvinfo->nr_blocks = total_size + 1; 2027 2028 cciss_get_device_descr(h, drv_index, drvinfo->vendor, 2029 drvinfo->model, drvinfo->rev); 2030 cciss_get_serial_no(h, drv_index, drvinfo->serial_no, 2031 sizeof(drvinfo->serial_no)); 2032 /* Save the lunid in case we deregister the disk, below. */ 2033 memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, 2034 sizeof(drvinfo->LunID)); 2035 2036 /* Is it the same disk we already know, and nothing's changed? */ 2037 if (h->drv[drv_index]->raid_level != -1 && 2038 ((memcmp(drvinfo->serial_no, 2039 h->drv[drv_index]->serial_no, 16) == 0) && 2040 drvinfo->block_size == h->drv[drv_index]->block_size && 2041 drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks && 2042 drvinfo->heads == h->drv[drv_index]->heads && 2043 drvinfo->sectors == h->drv[drv_index]->sectors && 2044 drvinfo->cylinders == h->drv[drv_index]->cylinders)) 2045 /* The disk is unchanged, nothing to update */ 2046 goto freeret; 2047 2048 /* If we get here it's not the same disk, or something's changed, 2049 * so we need to * deregister it, and re-register it, if it's not 2050 * in use. 2051 * If the disk already exists then deregister it before proceeding 2052 * (unless it's the first disk (for the controller node). 2053 */ 2054 if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { 2055 dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index); 2056 spin_lock_irqsave(&h->lock, flags); 2057 h->drv[drv_index]->busy_configuring = 1; 2058 spin_unlock_irqrestore(&h->lock, flags); 2059 2060 /* deregister_disk sets h->drv[drv_index]->queue = NULL 2061 * which keeps the interrupt handler from starting 2062 * the queue. 2063 */ 2064 ret = deregister_disk(h, drv_index, 0, via_ioctl); 2065 } 2066 2067 /* If the disk is in use return */ 2068 if (ret) 2069 goto freeret; 2070 2071 /* Save the new information from cciss_geometry_inquiry 2072 * and serial number inquiry. If the disk was deregistered 2073 * above, then h->drv[drv_index] will be NULL. 2074 */ 2075 if (h->drv[drv_index] == NULL) { 2076 drvinfo->device_initialized = 0; 2077 h->drv[drv_index] = drvinfo; 2078 drvinfo = NULL; /* so it won't be freed below. */ 2079 } else { 2080 /* special case for cxd0 */ 2081 h->drv[drv_index]->block_size = drvinfo->block_size; 2082 h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks; 2083 h->drv[drv_index]->heads = drvinfo->heads; 2084 h->drv[drv_index]->sectors = drvinfo->sectors; 2085 h->drv[drv_index]->cylinders = drvinfo->cylinders; 2086 h->drv[drv_index]->raid_level = drvinfo->raid_level; 2087 memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16); 2088 memcpy(h->drv[drv_index]->vendor, drvinfo->vendor, 2089 VENDOR_LEN + 1); 2090 memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1); 2091 memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1); 2092 } 2093 2094 ++h->num_luns; 2095 disk = h->gendisk[drv_index]; 2096 set_capacity(disk, h->drv[drv_index]->nr_blocks); 2097 2098 /* If it's not disk 0 (drv_index != 0) 2099 * or if it was disk 0, but there was previously 2100 * no actual corresponding configured logical drive 2101 * (raid_leve == -1) then we want to update the 2102 * logical drive's information. 2103 */ 2104 if (drv_index || first_time) { 2105 if (cciss_add_disk(h, disk, drv_index) != 0) { 2106 cciss_free_gendisk(h, drv_index); 2107 cciss_free_drive_info(h, drv_index); 2108 dev_warn(&h->pdev->dev, "could not update disk %d\n", 2109 drv_index); 2110 --h->num_luns; 2111 } 2112 } 2113 2114freeret: 2115 kfree(inq_buff); 2116 kfree(drvinfo); 2117 return; 2118mem_msg: 2119 dev_err(&h->pdev->dev, "out of memory\n"); 2120 goto freeret; 2121} 2122 2123/* This function will find the first index of the controllers drive array 2124 * that has a null drv pointer and allocate the drive info struct and 2125 * will return that index This is where new drives will be added. 2126 * If the index to be returned is greater than the highest_lun index for 2127 * the controller then highest_lun is set * to this new index. 2128 * If there are no available indexes or if tha allocation fails, then -1 2129 * is returned. * "controller_node" is used to know if this is a real 2130 * logical drive, or just the controller node, which determines if this 2131 * counts towards highest_lun. 2132 */ 2133static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node) 2134{ 2135 int i; 2136 drive_info_struct *drv; 2137 2138 /* Search for an empty slot for our drive info */ 2139 for (i = 0; i < CISS_MAX_LUN; i++) { 2140 2141 /* if not cxd0 case, and it's occupied, skip it. */ 2142 if (h->drv[i] && i != 0) 2143 continue; 2144 /* 2145 * If it's cxd0 case, and drv is alloc'ed already, and a 2146 * disk is configured there, skip it. 2147 */ 2148 if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1) 2149 continue; 2150 2151 /* 2152 * We've found an empty slot. Update highest_lun 2153 * provided this isn't just the fake cxd0 controller node. 2154 */ 2155 if (i > h->highest_lun && !controller_node) 2156 h->highest_lun = i; 2157 2158 /* If adding a real disk at cxd0, and it's already alloc'ed */ 2159 if (i == 0 && h->drv[i] != NULL) 2160 return i; 2161 2162 /* 2163 * Found an empty slot, not already alloc'ed. Allocate it. 2164 * Mark it with raid_level == -1, so we know it's new later on. 2165 */ 2166 drv = kzalloc(sizeof(*drv), GFP_KERNEL); 2167 if (!drv) 2168 return -1; 2169 drv->raid_level = -1; /* so we know it's new */ 2170 h->drv[i] = drv; 2171 return i; 2172 } 2173 return -1; 2174} 2175 2176static void cciss_free_drive_info(ctlr_info_t *h, int drv_index) 2177{ 2178 kfree(h->drv[drv_index]); 2179 h->drv[drv_index] = NULL; 2180} 2181 2182static void cciss_free_gendisk(ctlr_info_t *h, int drv_index) 2183{ 2184 put_disk(h->gendisk[drv_index]); 2185 h->gendisk[drv_index] = NULL; 2186} 2187 2188/* cciss_add_gendisk finds a free hba[]->drv structure 2189 * and allocates a gendisk if needed, and sets the lunid 2190 * in the drvinfo structure. It returns the index into 2191 * the ->drv[] array, or -1 if none are free. 2192 * is_controller_node indicates whether highest_lun should 2193 * count this disk, or if it's only being added to provide 2194 * a means to talk to the controller in case no logical 2195 * drives have yet been configured. 2196 */ 2197static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[], 2198 int controller_node) 2199{ 2200 int drv_index; 2201 2202 drv_index = cciss_alloc_drive_info(h, controller_node); 2203 if (drv_index == -1) 2204 return -1; 2205 2206 /*Check if the gendisk needs to be allocated */ 2207 if (!h->gendisk[drv_index]) { 2208 h->gendisk[drv_index] = 2209 alloc_disk(1 << NWD_SHIFT); 2210 if (!h->gendisk[drv_index]) { 2211 dev_err(&h->pdev->dev, 2212 "could not allocate a new disk %d\n", 2213 drv_index); 2214 goto err_free_drive_info; 2215 } 2216 } 2217 memcpy(h->drv[drv_index]->LunID, lunid, 2218 sizeof(h->drv[drv_index]->LunID)); 2219 if (cciss_create_ld_sysfs_entry(h, drv_index)) 2220 goto err_free_disk; 2221 /* Don't need to mark this busy because nobody */ 2222 /* else knows about this disk yet to contend */ 2223 /* for access to it. */ 2224 h->drv[drv_index]->busy_configuring = 0; 2225 wmb(); 2226 return drv_index; 2227 2228err_free_disk: 2229 cciss_free_gendisk(h, drv_index); 2230err_free_drive_info: 2231 cciss_free_drive_info(h, drv_index); 2232 return -1; 2233} 2234 2235/* This is for the special case of a controller which 2236 * has no logical drives. In this case, we still need 2237 * to register a disk so the controller can be accessed 2238 * by the Array Config Utility. 2239 */ 2240static void cciss_add_controller_node(ctlr_info_t *h) 2241{ 2242 struct gendisk *disk; 2243 int drv_index; 2244 2245 if (h->gendisk[0] != NULL) /* already did this? Then bail. */ 2246 return; 2247 2248 drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1); 2249 if (drv_index == -1) 2250 goto error; 2251 h->drv[drv_index]->block_size = 512; 2252 h->drv[drv_index]->nr_blocks = 0; 2253 h->drv[drv_index]->heads = 0; 2254 h->drv[drv_index]->sectors = 0; 2255 h->drv[drv_index]->cylinders = 0; 2256 h->drv[drv_index]->raid_level = -1; 2257 memset(h->drv[drv_index]->serial_no, 0, 16); 2258 disk = h->gendisk[drv_index]; 2259 if (cciss_add_disk(h, disk, drv_index) == 0) 2260 return; 2261 cciss_free_gendisk(h, drv_index); 2262 cciss_free_drive_info(h, drv_index); 2263error: 2264 dev_warn(&h->pdev->dev, "could not add disk 0.\n"); 2265 return; 2266} 2267 2268/* This function will add and remove logical drives from the Logical 2269 * drive array of the controller and maintain persistency of ordering 2270 * so that mount points are preserved until the next reboot. This allows 2271 * for the removal of logical drives in the middle of the drive array 2272 * without a re-ordering of those drives. 2273 * INPUT 2274 * h = The controller to perform the operations on 2275 */ 2276static int rebuild_lun_table(ctlr_info_t *h, int first_time, 2277 int via_ioctl) 2278{ 2279 int num_luns; 2280 ReportLunData_struct *ld_buff = NULL; 2281 int return_code; 2282 int listlength = 0; 2283 int i; 2284 int drv_found; 2285 int drv_index = 0; 2286 unsigned char lunid[8] = CTLR_LUNID; 2287 unsigned long flags; 2288 2289 if (!capable(CAP_SYS_RAWIO)) 2290 return -EPERM; 2291 2292 /* Set busy_configuring flag for this operation */ 2293 spin_lock_irqsave(&h->lock, flags); 2294 if (h->busy_configuring) { 2295 spin_unlock_irqrestore(&h->lock, flags); 2296 return -EBUSY; 2297 } 2298 h->busy_configuring = 1; 2299 spin_unlock_irqrestore(&h->lock, flags); 2300 2301 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); 2302 if (ld_buff == NULL) 2303 goto mem_msg; 2304 2305 return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff, 2306 sizeof(ReportLunData_struct), 2307 0, CTLR_LUNID, TYPE_CMD); 2308 2309 if (return_code == IO_OK) 2310 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); 2311 else { /* reading number of logical volumes failed */ 2312 dev_warn(&h->pdev->dev, 2313 "report logical volume command failed\n"); 2314 listlength = 0; 2315 goto freeret; 2316 } 2317 2318 num_luns = listlength / 8; /* 8 bytes per entry */ 2319 if (num_luns > CISS_MAX_LUN) { 2320 num_luns = CISS_MAX_LUN; 2321 dev_warn(&h->pdev->dev, "more luns configured" 2322 " on controller than can be handled by" 2323 " this driver.\n"); 2324 } 2325 2326 if (num_luns == 0) 2327 cciss_add_controller_node(h); 2328 2329 /* Compare controller drive array to driver's drive array 2330 * to see if any drives are missing on the controller due 2331 * to action of Array Config Utility (user deletes drive) 2332 * and deregister logical drives which have disappeared. 2333 */ 2334 for (i = 0; i <= h->highest_lun; i++) { 2335 int j; 2336 drv_found = 0; 2337 2338 /* skip holes in the array from already deleted drives */ 2339 if (h->drv[i] == NULL) 2340 continue; 2341 2342 for (j = 0; j < num_luns; j++) { 2343 memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid)); 2344 if (memcmp(h->drv[i]->LunID, lunid, 2345 sizeof(lunid)) == 0) { 2346 drv_found = 1; 2347 break; 2348 } 2349 } 2350 if (!drv_found) { 2351 /* Deregister it from the OS, it's gone. */ 2352 spin_lock_irqsave(&h->lock, flags); 2353 h->drv[i]->busy_configuring = 1; 2354 spin_unlock_irqrestore(&h->lock, flags); 2355 return_code = deregister_disk(h, i, 1, via_ioctl); 2356 if (h->drv[i] != NULL) 2357 h->drv[i]->busy_configuring = 0; 2358 } 2359 } 2360 2361 /* Compare controller drive array to driver's drive array. 2362 * Check for updates in the drive information and any new drives 2363 * on the controller due to ACU adding logical drives, or changing 2364 * a logical drive's size, etc. Reregister any new/changed drives 2365 */ 2366 for (i = 0; i < num_luns; i++) { 2367 int j; 2368 2369 drv_found = 0; 2370 2371 memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid)); 2372 /* Find if the LUN is already in the drive array 2373 * of the driver. If so then update its info 2374 * if not in use. If it does not exist then find 2375 * the first free index and add it. 2376 */ 2377 for (j = 0; j <= h->highest_lun; j++) { 2378 if (h->drv[j] != NULL && 2379 memcmp(h->drv[j]->LunID, lunid, 2380 sizeof(h->drv[j]->LunID)) == 0) { 2381 drv_index = j; 2382 drv_found = 1; 2383 break; 2384 } 2385 } 2386 2387 /* check if the drive was found already in the array */ 2388 if (!drv_found) { 2389 drv_index = cciss_add_gendisk(h, lunid, 0); 2390 if (drv_index == -1) 2391 goto freeret; 2392 } 2393 cciss_update_drive_info(h, drv_index, first_time, via_ioctl); 2394 } /* end for */ 2395 2396freeret: 2397 kfree(ld_buff); 2398 h->busy_configuring = 0; 2399 /* We return -1 here to tell the ACU that we have registered/updated 2400 * all of the drives that we can and to keep it from calling us 2401 * additional times. 2402 */ 2403 return -1; 2404mem_msg: 2405 dev_err(&h->pdev->dev, "out of memory\n"); 2406 h->busy_configuring = 0; 2407 goto freeret; 2408} 2409 2410static void cciss_clear_drive_info(drive_info_struct *drive_info) 2411{ 2412 /* zero out the disk size info */ 2413 drive_info->nr_blocks = 0; 2414 drive_info->block_size = 0; 2415 drive_info->heads = 0; 2416 drive_info->sectors = 0; 2417 drive_info->cylinders = 0; 2418 drive_info->raid_level = -1; 2419 memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no)); 2420 memset(drive_info->model, 0, sizeof(drive_info->model)); 2421 memset(drive_info->rev, 0, sizeof(drive_info->rev)); 2422 memset(drive_info->vendor, 0, sizeof(drive_info->vendor)); 2423 /* 2424 * don't clear the LUNID though, we need to remember which 2425 * one this one is. 2426 */ 2427} 2428 2429/* This function will deregister the disk and it's queue from the 2430 * kernel. It must be called with the controller lock held and the 2431 * drv structures busy_configuring flag set. It's parameters are: 2432 * 2433 * disk = This is the disk to be deregistered 2434 * drv = This is the drive_info_struct associated with the disk to be 2435 * deregistered. It contains information about the disk used 2436 * by the driver. 2437 * clear_all = This flag determines whether or not the disk information 2438 * is going to be completely cleared out and the highest_lun 2439 * reset. Sometimes we want to clear out information about 2440 * the disk in preparation for re-adding it. In this case 2441 * the highest_lun should be left unchanged and the LunID 2442 * should not be cleared. 2443 * via_ioctl 2444 * This indicates whether we've reached this path via ioctl. 2445 * This affects the maximum usage count allowed for c0d0 to be messed with. 2446 * If this path is reached via ioctl(), then the max_usage_count will 2447 * be 1, as the process calling ioctl() has got to have the device open. 2448 * If we get here via sysfs, then the max usage count will be zero. 2449*/ 2450static int deregister_disk(ctlr_info_t *h, int drv_index, 2451 int clear_all, int via_ioctl) 2452{ 2453 int i; 2454 struct gendisk *disk; 2455 drive_info_struct *drv; 2456 int recalculate_highest_lun; 2457 2458 if (!capable(CAP_SYS_RAWIO)) 2459 return -EPERM; 2460 2461 drv = h->drv[drv_index]; 2462 disk = h->gendisk[drv_index]; 2463 2464 /* make sure logical volume is NOT is use */ 2465 if (clear_all || (h->gendisk[0] == disk)) { 2466 if (drv->usage_count > via_ioctl) 2467 return -EBUSY; 2468 } else if (drv->usage_count > 0) 2469 return -EBUSY; 2470 2471 recalculate_highest_lun = (drv == h->drv[h->highest_lun]); 2472 2473 /* invalidate the devices and deregister the disk. If it is disk 2474 * zero do not deregister it but just zero out it's values. This 2475 * allows us to delete disk zero but keep the controller registered. 2476 */ 2477 if (h->gendisk[0] != disk) { 2478 struct request_queue *q = disk->queue; 2479 if (disk->flags & GENHD_FL_UP) { 2480 cciss_destroy_ld_sysfs_entry(h, drv_index, 0); 2481 del_gendisk(disk); 2482 } 2483 if (q) 2484 blk_cleanup_queue(q); 2485 /* If clear_all is set then we are deleting the logical 2486 * drive, not just refreshing its info. For drives 2487 * other than disk 0 we will call put_disk. We do not 2488 * do this for disk 0 as we need it to be able to 2489 * configure the controller. 2490 */ 2491 if (clear_all){ 2492 /* This isn't pretty, but we need to find the 2493 * disk in our array and NULL our the pointer. 2494 * This is so that we will call alloc_disk if 2495 * this index is used again later. 2496 */ 2497 for (i=0; i < CISS_MAX_LUN; i++){ 2498 if (h->gendisk[i] == disk) { 2499 h->gendisk[i] = NULL; 2500 break; 2501 } 2502 } 2503 put_disk(disk); 2504 } 2505 } else { 2506 set_capacity(disk, 0); 2507 cciss_clear_drive_info(drv); 2508 } 2509 2510 --h->num_luns; 2511 2512 /* if it was the last disk, find the new hightest lun */ 2513 if (clear_all && recalculate_highest_lun) { 2514 int newhighest = -1; 2515 for (i = 0; i <= h->highest_lun; i++) { 2516 /* if the disk has size > 0, it is available */ 2517 if (h->drv[i] && h->drv[i]->heads) 2518 newhighest = i; 2519 } 2520 h->highest_lun = newhighest; 2521 } 2522 return 0; 2523} 2524 2525static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, 2526 size_t size, __u8 page_code, unsigned char *scsi3addr, 2527 int cmd_type) 2528{ 2529 u64bit buff_dma_handle; 2530 int status = IO_OK; 2531 2532 c->cmd_type = CMD_IOCTL_PEND; 2533 c->Header.ReplyQueue = 0; 2534 if (buff != NULL) { 2535 c->Header.SGList = 1; 2536 c->Header.SGTotal = 1; 2537 } else { 2538 c->Header.SGList = 0; 2539 c->Header.SGTotal = 0; 2540 } 2541 c->Header.Tag.lower = c->busaddr; 2542 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 2543 2544 c->Request.Type.Type = cmd_type; 2545 if (cmd_type == TYPE_CMD) { 2546 switch (cmd) { 2547 case CISS_INQUIRY: 2548 /* are we trying to read a vital product page */ 2549 if (page_code != 0) { 2550 c->Request.CDB[1] = 0x01; 2551 c->Request.CDB[2] = page_code; 2552 } 2553 c->Request.CDBLen = 6; 2554 c->Request.Type.Attribute = ATTR_SIMPLE; 2555 c->Request.Type.Direction = XFER_READ; 2556 c->Request.Timeout = 0; 2557 c->Request.CDB[0] = CISS_INQUIRY; 2558 c->Request.CDB[4] = size & 0xFF; 2559 break; 2560 case CISS_REPORT_LOG: 2561 case CISS_REPORT_PHYS: 2562 /* Talking to controller so It's a physical command 2563 mode = 00 target = 0. Nothing to write. 2564 */ 2565 c->Request.CDBLen = 12; 2566 c->Request.Type.Attribute = ATTR_SIMPLE; 2567 c->Request.Type.Direction = XFER_READ; 2568 c->Request.Timeout = 0; 2569 c->Request.CDB[0] = cmd; 2570 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 2571 c->Request.CDB[7] = (size >> 16) & 0xFF; 2572 c->Request.CDB[8] = (size >> 8) & 0xFF; 2573 c->Request.CDB[9] = size & 0xFF; 2574 break; 2575 2576 case CCISS_READ_CAPACITY: 2577 c->Request.CDBLen = 10; 2578 c->Request.Type.Attribute = ATTR_SIMPLE; 2579 c->Request.Type.Direction = XFER_READ; 2580 c->Request.Timeout = 0; 2581 c->Request.CDB[0] = cmd; 2582 break; 2583 case CCISS_READ_CAPACITY_16: 2584 c->Request.CDBLen = 16; 2585 c->Request.Type.Attribute = ATTR_SIMPLE; 2586 c->Request.Type.Direction = XFER_READ; 2587 c->Request.Timeout = 0; 2588 c->Request.CDB[0] = cmd; 2589 c->Request.CDB[1] = 0x10; 2590 c->Request.CDB[10] = (size >> 24) & 0xFF; 2591 c->Request.CDB[11] = (size >> 16) & 0xFF; 2592 c->Request.CDB[12] = (size >> 8) & 0xFF; 2593 c->Request.CDB[13] = size & 0xFF; 2594 c->Request.Timeout = 0; 2595 c->Request.CDB[0] = cmd; 2596 break; 2597 case CCISS_CACHE_FLUSH: 2598 c->Request.CDBLen = 12; 2599 c->Request.Type.Attribute = ATTR_SIMPLE; 2600 c->Request.Type.Direction = XFER_WRITE; 2601 c->Request.Timeout = 0; 2602 c->Request.CDB[0] = BMIC_WRITE; 2603 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 2604 c->Request.CDB[7] = (size >> 8) & 0xFF; 2605 c->Request.CDB[8] = size & 0xFF; 2606 break; 2607 case TEST_UNIT_READY: 2608 c->Request.CDBLen = 6; 2609 c->Request.Type.Attribute = ATTR_SIMPLE; 2610 c->Request.Type.Direction = XFER_NONE; 2611 c->Request.Timeout = 0; 2612 break; 2613 default: 2614 dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd); 2615 return IO_ERROR; 2616 } 2617 } else if (cmd_type == TYPE_MSG) { 2618 switch (cmd) { 2619 case CCISS_ABORT_MSG: 2620 c->Request.CDBLen = 12; 2621 c->Request.Type.Attribute = ATTR_SIMPLE; 2622 c->Request.Type.Direction = XFER_WRITE; 2623 c->Request.Timeout = 0; 2624 c->Request.CDB[0] = cmd; /* abort */ 2625 c->Request.CDB[1] = 0; /* abort a command */ 2626 /* buff contains the tag of the command to abort */ 2627 memcpy(&c->Request.CDB[4], buff, 8); 2628 break; 2629 case CCISS_RESET_MSG: 2630 c->Request.CDBLen = 16; 2631 c->Request.Type.Attribute = ATTR_SIMPLE; 2632 c->Request.Type.Direction = XFER_NONE; 2633 c->Request.Timeout = 0; 2634 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 2635 c->Request.CDB[0] = cmd; /* reset */ 2636 c->Request.CDB[1] = CCISS_RESET_TYPE_TARGET; 2637 break; 2638 case CCISS_NOOP_MSG: 2639 c->Request.CDBLen = 1; 2640 c->Request.Type.Attribute = ATTR_SIMPLE; 2641 c->Request.Type.Direction = XFER_WRITE; 2642 c->Request.Timeout = 0; 2643 c->Request.CDB[0] = cmd; 2644 break; 2645 default: 2646 dev_warn(&h->pdev->dev, 2647 "unknown message type %d\n", cmd); 2648 return IO_ERROR; 2649 } 2650 } else { 2651 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 2652 return IO_ERROR; 2653 } 2654 /* Fill in the scatter gather information */ 2655 if (size > 0) { 2656 buff_dma_handle.val = (__u64) pci_map_single(h->pdev, 2657 buff, size, 2658 PCI_DMA_BIDIRECTIONAL); 2659 c->SG[0].Addr.lower = buff_dma_handle.val32.lower; 2660 c->SG[0].Addr.upper = buff_dma_handle.val32.upper; 2661 c->SG[0].Len = size; 2662 c->SG[0].Ext = 0; /* we are not chaining */ 2663 } 2664 return status; 2665} 2666 2667static int __devinit cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr, 2668 u8 reset_type) 2669{ 2670 CommandList_struct *c; 2671 int return_status; 2672 2673 c = cmd_alloc(h); 2674 if (!c) 2675 return -ENOMEM; 2676 return_status = fill_cmd(h, c, CCISS_RESET_MSG, NULL, 0, 0, 2677 CTLR_LUNID, TYPE_MSG); 2678 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ 2679 if (return_status != IO_OK) { 2680 cmd_special_free(h, c); 2681 return return_status; 2682 } 2683 c->waiting = NULL; 2684 enqueue_cmd_and_start_io(h, c); 2685 /* Don't wait for completion, the reset won't complete. Don't free 2686 * the command either. This is the last command we will send before 2687 * re-initializing everything, so it doesn't matter and won't leak. 2688 */ 2689 return 0; 2690} 2691 2692static int check_target_status(ctlr_info_t *h, CommandList_struct *c) 2693{ 2694 switch (c->err_info->ScsiStatus) { 2695 case SAM_STAT_GOOD: 2696 return IO_OK; 2697 case SAM_STAT_CHECK_CONDITION: 2698 switch (0xf & c->err_info->SenseInfo[2]) { 2699 case 0: return IO_OK; /* no sense */ 2700 case 1: return IO_OK; /* recovered error */ 2701 default: 2702 if (check_for_unit_attention(h, c)) 2703 return IO_NEEDS_RETRY; 2704 dev_warn(&h->pdev->dev, "cmd 0x%02x " 2705 "check condition, sense key = 0x%02x\n", 2706 c->Request.CDB[0], c->err_info->SenseInfo[2]); 2707 } 2708 break; 2709 default: 2710 dev_warn(&h->pdev->dev, "cmd 0x%02x" 2711 "scsi status = 0x%02x\n", 2712 c->Request.CDB[0], c->err_info->ScsiStatus); 2713 break; 2714 } 2715 return IO_ERROR; 2716} 2717 2718static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c) 2719{ 2720 int return_status = IO_OK; 2721 2722 if (c->err_info->CommandStatus == CMD_SUCCESS) 2723 return IO_OK; 2724 2725 switch (c->err_info->CommandStatus) { 2726 case CMD_TARGET_STATUS: 2727 return_status = check_target_status(h, c); 2728 break; 2729 case CMD_DATA_UNDERRUN: 2730 case CMD_DATA_OVERRUN: 2731 /* expected for inquiry and report lun commands */ 2732 break; 2733 case CMD_INVALID: 2734 dev_warn(&h->pdev->dev, "cmd 0x%02x is " 2735 "reported invalid\n", c->Request.CDB[0]); 2736 return_status = IO_ERROR; 2737 break; 2738 case CMD_PROTOCOL_ERR: 2739 dev_warn(&h->pdev->dev, "cmd 0x%02x has " 2740 "protocol error\n", c->Request.CDB[0]); 2741 return_status = IO_ERROR; 2742 break; 2743 case CMD_HARDWARE_ERR: 2744 dev_warn(&h->pdev->dev, "cmd 0x%02x had " 2745 " hardware error\n", c->Request.CDB[0]); 2746 return_status = IO_ERROR; 2747 break; 2748 case CMD_CONNECTION_LOST: 2749 dev_warn(&h->pdev->dev, "cmd 0x%02x had " 2750 "connection lost\n", c->Request.CDB[0]); 2751 return_status = IO_ERROR; 2752 break; 2753 case CMD_ABORTED: 2754 dev_warn(&h->pdev->dev, "cmd 0x%02x was " 2755 "aborted\n", c->Request.CDB[0]); 2756 return_status = IO_ERROR; 2757 break; 2758 case CMD_ABORT_FAILED: 2759 dev_warn(&h->pdev->dev, "cmd 0x%02x reports " 2760 "abort failed\n", c->Request.CDB[0]); 2761 return_status = IO_ERROR; 2762 break; 2763 case CMD_UNSOLICITED_ABORT: 2764 dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n", 2765 c->Request.CDB[0]); 2766 return_status = IO_NEEDS_RETRY; 2767 break; 2768 case CMD_UNABORTABLE: 2769 dev_warn(&h->pdev->dev, "cmd unabortable\n"); 2770 return_status = IO_ERROR; 2771 break; 2772 default: 2773 dev_warn(&h->pdev->dev, "cmd 0x%02x returned " 2774 "unknown status %x\n", c->Request.CDB[0], 2775 c->err_info->CommandStatus); 2776 return_status = IO_ERROR; 2777 } 2778 return return_status; 2779} 2780 2781static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, 2782 int attempt_retry) 2783{ 2784 DECLARE_COMPLETION_ONSTACK(wait); 2785 u64bit buff_dma_handle; 2786 int return_status = IO_OK; 2787 2788resend_cmd2: 2789 c->waiting = &wait; 2790 enqueue_cmd_and_start_io(h, c); 2791 2792 wait_for_completion(&wait); 2793 2794 if (c->err_info->CommandStatus == 0 || !attempt_retry) 2795 goto command_done; 2796 2797 return_status = process_sendcmd_error(h, c); 2798 2799 if (return_status == IO_NEEDS_RETRY && 2800 c->retry_count < MAX_CMD_RETRIES) { 2801 dev_warn(&h->pdev->dev, "retrying 0x%02x\n", 2802 c->Request.CDB[0]); 2803 c->retry_count++; 2804 /* erase the old error information */ 2805 memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 2806 return_status = IO_OK; 2807 INIT_COMPLETION(wait); 2808 goto resend_cmd2; 2809 } 2810 2811command_done: 2812 /* unlock the buffers from DMA */ 2813 buff_dma_handle.val32.lower = c->SG[0].Addr.lower; 2814 buff_dma_handle.val32.upper = c->SG[0].Addr.upper; 2815 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val, 2816 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); 2817 return return_status; 2818} 2819 2820static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, 2821 __u8 page_code, unsigned char scsi3addr[], 2822 int cmd_type) 2823{ 2824 CommandList_struct *c; 2825 int return_status; 2826 2827 c = cmd_special_alloc(h); 2828 if (!c) 2829 return -ENOMEM; 2830 return_status = fill_cmd(h, c, cmd, buff, size, page_code, 2831 scsi3addr, cmd_type); 2832 if (return_status == IO_OK) 2833 return_status = sendcmd_withirq_core(h, c, 1); 2834 2835 cmd_special_free(h, c); 2836 return return_status; 2837} 2838 2839static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, 2840 sector_t total_size, 2841 unsigned int block_size, 2842 InquiryData_struct *inq_buff, 2843 drive_info_struct *drv) 2844{ 2845 int return_code; 2846 unsigned long t; 2847 unsigned char scsi3addr[8]; 2848 2849 memset(inq_buff, 0, sizeof(InquiryData_struct)); 2850 log_unit_to_scsi3addr(h, scsi3addr, logvol); 2851 return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, 2852 sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD); 2853 if (return_code == IO_OK) { 2854 if (inq_buff->data_byte[8] == 0xFF) { 2855 dev_warn(&h->pdev->dev, 2856 "reading geometry failed, volume " 2857 "does not support reading geometry\n"); 2858 drv->heads = 255; 2859 drv->sectors = 32; /* Sectors per track */ 2860 drv->cylinders = total_size + 1; 2861 drv->raid_level = RAID_UNKNOWN; 2862 } else { 2863 drv->heads = inq_buff->data_byte[6]; 2864 drv->sectors = inq_buff->data_byte[7]; 2865 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8; 2866 drv->cylinders += inq_buff->data_byte[5]; 2867 drv->raid_level = inq_buff->data_byte[8]; 2868 } 2869 drv->block_size = block_size; 2870 drv->nr_blocks = total_size + 1; 2871 t = drv->heads * drv->sectors; 2872 if (t > 1) { 2873 sector_t real_size = total_size + 1; 2874 unsigned long rem = sector_div(real_size, t); 2875 if (rem) 2876 real_size++; 2877 drv->cylinders = real_size; 2878 } 2879 } else { /* Get geometry failed */ 2880 dev_warn(&h->pdev->dev, "reading geometry failed\n"); 2881 } 2882} 2883 2884static void 2885cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size, 2886 unsigned int *block_size) 2887{ 2888 ReadCapdata_struct *buf; 2889 int return_code; 2890 unsigned char scsi3addr[8]; 2891 2892 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); 2893 if (!buf) { 2894 dev_warn(&h->pdev->dev, "out of memory\n"); 2895 return; 2896 } 2897 2898 log_unit_to_scsi3addr(h, scsi3addr, logvol); 2899 return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf, 2900 sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD); 2901 if (return_code == IO_OK) { 2902 *total_size = be32_to_cpu(*(__be32 *) buf->total_size); 2903 *block_size = be32_to_cpu(*(__be32 *) buf->block_size); 2904 } else { /* read capacity command failed */ 2905 dev_warn(&h->pdev->dev, "read capacity failed\n"); 2906 *total_size = 0; 2907 *block_size = BLOCK_SIZE; 2908 } 2909 kfree(buf); 2910} 2911 2912static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, 2913 sector_t *total_size, unsigned int *block_size) 2914{ 2915 ReadCapdata_struct_16 *buf; 2916 int return_code; 2917 unsigned char scsi3addr[8]; 2918 2919 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); 2920 if (!buf) { 2921 dev_warn(&h->pdev->dev, "out of memory\n"); 2922 return; 2923 } 2924 2925 log_unit_to_scsi3addr(h, scsi3addr, logvol); 2926 return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16, 2927 buf, sizeof(ReadCapdata_struct_16), 2928 0, scsi3addr, TYPE_CMD); 2929 if (return_code == IO_OK) { 2930 *total_size = be64_to_cpu(*(__be64 *) buf->total_size); 2931 *block_size = be32_to_cpu(*(__be32 *) buf->block_size); 2932 } else { /* read capacity command failed */ 2933 dev_warn(&h->pdev->dev, "read capacity failed\n"); 2934 *total_size = 0; 2935 *block_size = BLOCK_SIZE; 2936 } 2937 dev_info(&h->pdev->dev, " blocks= %llu block_size= %d\n", 2938 (unsigned long long)*total_size+1, *block_size); 2939 kfree(buf); 2940} 2941 2942static int cciss_revalidate(struct gendisk *disk) 2943{ 2944 ctlr_info_t *h = get_host(disk); 2945 drive_info_struct *drv = get_drv(disk); 2946 int logvol; 2947 int FOUND = 0; 2948 unsigned int block_size; 2949 sector_t total_size; 2950 InquiryData_struct *inq_buff = NULL; 2951 2952 for (logvol = 0; logvol <= h->highest_lun; logvol++) { 2953 if (!h->drv[logvol]) 2954 continue; 2955 if (memcmp(h->drv[logvol]->LunID, drv->LunID, 2956 sizeof(drv->LunID)) == 0) { 2957 FOUND = 1; 2958 break; 2959 } 2960 } 2961 2962 if (!FOUND) 2963 return 1; 2964 2965 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); 2966 if (inq_buff == NULL) { 2967 dev_warn(&h->pdev->dev, "out of memory\n"); 2968 return 1; 2969 } 2970 if (h->cciss_read == CCISS_READ_10) { 2971 cciss_read_capacity(h, logvol, 2972 &total_size, &block_size); 2973 } else { 2974 cciss_read_capacity_16(h, logvol, 2975 &total_size, &block_size); 2976 } 2977 cciss_geometry_inquiry(h, logvol, total_size, block_size, 2978 inq_buff, drv); 2979 2980 blk_queue_logical_block_size(drv->queue, drv->block_size); 2981 set_capacity(disk, drv->nr_blocks); 2982 2983 kfree(inq_buff); 2984 return 0; 2985} 2986 2987/* 2988 * Map (physical) PCI mem into (virtual) kernel space 2989 */ 2990static void __iomem *remap_pci_mem(ulong base, ulong size) 2991{ 2992 ulong page_base = ((ulong) base) & PAGE_MASK; 2993 ulong page_offs = ((ulong) base) - page_base; 2994 void __iomem *page_remapped = ioremap(page_base, page_offs + size); 2995 2996 return page_remapped ? (page_remapped + page_offs) : NULL; 2997} 2998 2999/* 3000 * Takes jobs of the Q and sends them to the hardware, then puts it on 3001 * the Q to wait for completion. 3002 */ 3003static void start_io(ctlr_info_t *h) 3004{ 3005 CommandList_struct *c; 3006 3007 while (!list_empty(&h->reqQ)) { 3008 c = list_entry(h->reqQ.next, CommandList_struct, list); 3009 /* can't do anything if fifo is full */ 3010 if ((h->access.fifo_full(h))) { 3011 dev_warn(&h->pdev->dev, "fifo full\n"); 3012 break; 3013 } 3014 3015 /* Get the first entry from the Request Q */ 3016 removeQ(c); 3017 h->Qdepth--; 3018 3019 /* Tell the controller execute command */ 3020 h->access.submit_command(h, c); 3021 3022 /* Put job onto the completed Q */ 3023 addQ(&h->cmpQ, c); 3024 } 3025} 3026 3027/* Assumes that h->lock is held. */ 3028/* Zeros out the error record and then resends the command back */ 3029/* to the controller */ 3030static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c) 3031{ 3032 /* erase the old error information */ 3033 memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 3034 3035 /* add it to software queue and then send it to the controller */ 3036 addQ(&h->reqQ, c); 3037 h->Qdepth++; 3038 if (h->Qdepth > h->maxQsinceinit) 3039 h->maxQsinceinit = h->Qdepth; 3040 3041 start_io(h); 3042} 3043 3044static inline unsigned int make_status_bytes(unsigned int scsi_status_byte, 3045 unsigned int msg_byte, unsigned int host_byte, 3046 unsigned int driver_byte) 3047{ 3048 /* inverse of macros in scsi.h */ 3049 return (scsi_status_byte & 0xff) | 3050 ((msg_byte & 0xff) << 8) | 3051 ((host_byte & 0xff) << 16) | 3052 ((driver_byte & 0xff) << 24); 3053} 3054 3055static inline int evaluate_target_status(ctlr_info_t *h, 3056 CommandList_struct *cmd, int *retry_cmd) 3057{ 3058 unsigned char sense_key; 3059 unsigned char status_byte, msg_byte, host_byte, driver_byte; 3060 int error_value; 3061 3062 *retry_cmd = 0; 3063 /* If we get in here, it means we got "target status", that is, scsi status */ 3064 status_byte = cmd->err_info->ScsiStatus; 3065 driver_byte = DRIVER_OK; 3066 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ 3067 3068 if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) 3069 host_byte = DID_PASSTHROUGH; 3070 else 3071 host_byte = DID_OK; 3072 3073 error_value = make_status_bytes(status_byte, msg_byte, 3074 host_byte, driver_byte); 3075 3076 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { 3077 if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) 3078 dev_warn(&h->pdev->dev, "cmd %p " 3079 "has SCSI Status 0x%x\n", 3080 cmd, cmd->err_info->ScsiStatus); 3081 return error_value; 3082 } 3083 3084 /* check the sense key */ 3085 sense_key = 0xf & cmd->err_info->SenseInfo[2]; 3086 /* no status or recovered error */ 3087 if (((sense_key == 0x0) || (sense_key == 0x1)) && 3088 (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)) 3089 error_value = 0; 3090 3091 if (check_for_unit_attention(h, cmd)) { 3092 *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC); 3093 return 0; 3094 } 3095 3096 /* Not SG_IO or similar? */ 3097 if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) { 3098 if (error_value != 0) 3099 dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION" 3100 " sense key = 0x%x\n", cmd, sense_key); 3101 return error_value; 3102 } 3103 3104 /* SG_IO or similar, copy sense data back */ 3105 if (cmd->rq->sense) { 3106 if (cmd->rq->sense_len > cmd->err_info->SenseLen) 3107 cmd->rq->sense_len = cmd->err_info->SenseLen; 3108 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo, 3109 cmd->rq->sense_len); 3110 } else 3111 cmd->rq->sense_len = 0; 3112 3113 return error_value; 3114} 3115 3116/* checks the status of the job and calls complete buffers to mark all 3117 * buffers for the completed job. Note that this function does not need 3118 * to hold the hba/queue lock. 3119 */ 3120static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, 3121 int timeout) 3122{ 3123 int retry_cmd = 0; 3124 struct request *rq = cmd->rq; 3125 3126 rq->errors = 0; 3127 3128 if (timeout) 3129 rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT); 3130 3131 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */ 3132 goto after_error_processing; 3133 3134 switch (cmd->err_info->CommandStatus) { 3135 case CMD_TARGET_STATUS: 3136 rq->errors = evaluate_target_status(h, cmd, &retry_cmd); 3137 break; 3138 case CMD_DATA_UNDERRUN: 3139 if (cmd->rq->cmd_type == REQ_TYPE_FS) { 3140 dev_warn(&h->pdev->dev, "cmd %p has" 3141 " completed with data underrun " 3142 "reported\n", cmd); 3143 cmd->rq->resid_len = cmd->err_info->ResidualCnt; 3144 } 3145 break; 3146 case CMD_DATA_OVERRUN: 3147 if (cmd->rq->cmd_type == REQ_TYPE_FS) 3148 dev_warn(&h->pdev->dev, "cciss: cmd %p has" 3149 " completed with data overrun " 3150 "reported\n", cmd); 3151 break; 3152 case CMD_INVALID: 3153 dev_warn(&h->pdev->dev, "cciss: cmd %p is " 3154 "reported invalid\n", cmd); 3155 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3156 cmd->err_info->CommandStatus, DRIVER_OK, 3157 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3158 DID_PASSTHROUGH : DID_ERROR); 3159 break; 3160 case CMD_PROTOCOL_ERR: 3161 dev_warn(&h->pdev->dev, "cciss: cmd %p has " 3162 "protocol error\n", cmd); 3163 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3164 cmd->err_info->CommandStatus, DRIVER_OK, 3165 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3166 DID_PASSTHROUGH : DID_ERROR); 3167 break; 3168 case CMD_HARDWARE_ERR: 3169 dev_warn(&h->pdev->dev, "cciss: cmd %p had " 3170 " hardware error\n", cmd); 3171 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3172 cmd->err_info->CommandStatus, DRIVER_OK, 3173 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3174 DID_PASSTHROUGH : DID_ERROR); 3175 break; 3176 case CMD_CONNECTION_LOST: 3177 dev_warn(&h->pdev->dev, "cciss: cmd %p had " 3178 "connection lost\n", cmd); 3179 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3180 cmd->err_info->CommandStatus, DRIVER_OK, 3181 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3182 DID_PASSTHROUGH : DID_ERROR); 3183 break; 3184 case CMD_ABORTED: 3185 dev_warn(&h->pdev->dev, "cciss: cmd %p was " 3186 "aborted\n", cmd); 3187 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3188 cmd->err_info->CommandStatus, DRIVER_OK, 3189 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3190 DID_PASSTHROUGH : DID_ABORT); 3191 break; 3192 case CMD_ABORT_FAILED: 3193 dev_warn(&h->pdev->dev, "cciss: cmd %p reports " 3194 "abort failed\n", cmd); 3195 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3196 cmd->err_info->CommandStatus, DRIVER_OK, 3197 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3198 DID_PASSTHROUGH : DID_ERROR); 3199 break; 3200 case CMD_UNSOLICITED_ABORT: 3201 dev_warn(&h->pdev->dev, "cciss%d: unsolicited " 3202 "abort %p\n", h->ctlr, cmd); 3203 if (cmd->retry_count < MAX_CMD_RETRIES) { 3204 retry_cmd = 1; 3205 dev_warn(&h->pdev->dev, "retrying %p\n", cmd); 3206 cmd->retry_count++; 3207 } else 3208 dev_warn(&h->pdev->dev, 3209 "%p retried too many times\n", cmd); 3210 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3211 cmd->err_info->CommandStatus, DRIVER_OK, 3212 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3213 DID_PASSTHROUGH : DID_ABORT); 3214 break; 3215 case CMD_TIMEOUT: 3216 dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd); 3217 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3218 cmd->err_info->CommandStatus, DRIVER_OK, 3219 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3220 DID_PASSTHROUGH : DID_ERROR); 3221 break; 3222 case CMD_UNABORTABLE: 3223 dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd); 3224 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3225 cmd->err_info->CommandStatus, DRIVER_OK, 3226 cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ? 3227 DID_PASSTHROUGH : DID_ERROR); 3228 break; 3229 default: 3230 dev_warn(&h->pdev->dev, "cmd %p returned " 3231 "unknown status %x\n", cmd, 3232 cmd->err_info->CommandStatus); 3233 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3234 cmd->err_info->CommandStatus, DRIVER_OK, 3235 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3236 DID_PASSTHROUGH : DID_ERROR); 3237 } 3238 3239after_error_processing: 3240 3241 /* We need to return this command */ 3242 if (retry_cmd) { 3243 resend_cciss_cmd(h, cmd); 3244 return; 3245 } 3246 cmd->rq->completion_data = cmd; 3247 blk_complete_request(cmd->rq); 3248} 3249 3250static inline u32 cciss_tag_contains_index(u32 tag) 3251{ 3252#define DIRECT_LOOKUP_BIT 0x10 3253 return tag & DIRECT_LOOKUP_BIT; 3254} 3255 3256static inline u32 cciss_tag_to_index(u32 tag) 3257{ 3258#define DIRECT_LOOKUP_SHIFT 5 3259 return tag >> DIRECT_LOOKUP_SHIFT; 3260} 3261 3262static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag) 3263{ 3264#define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) 3265#define CCISS_SIMPLE_ERROR_BITS 0x03 3266 if (likely(h->transMethod & CFGTBL_Trans_Performant)) 3267 return tag & ~CCISS_PERF_ERROR_BITS; 3268 return tag & ~CCISS_SIMPLE_ERROR_BITS; 3269} 3270 3271static inline void cciss_mark_tag_indexed(u32 *tag) 3272{ 3273 *tag |= DIRECT_LOOKUP_BIT; 3274} 3275 3276static inline void cciss_set_tag_index(u32 *tag, u32 index) 3277{ 3278 *tag |= (index << DIRECT_LOOKUP_SHIFT); 3279} 3280 3281/* 3282 * Get a request and submit it to the controller. 3283 */ 3284static void do_cciss_request(struct request_queue *q) 3285{ 3286 ctlr_info_t *h = q->queuedata; 3287 CommandList_struct *c; 3288 sector_t start_blk; 3289 int seg; 3290 struct request *creq; 3291 u64bit temp64; 3292 struct scatterlist *tmp_sg; 3293 SGDescriptor_struct *curr_sg; 3294 drive_info_struct *drv; 3295 int i, dir; 3296 int sg_index = 0; 3297 int chained = 0; 3298 3299 queue: 3300 creq = blk_peek_request(q); 3301 if (!creq) 3302 goto startio; 3303 3304 BUG_ON(creq->nr_phys_segments > h->maxsgentries); 3305 3306 c = cmd_alloc(h); 3307 if (!c) 3308 goto full; 3309 3310 blk_start_request(creq); 3311 3312 tmp_sg = h->scatter_list[c->cmdindex]; 3313 spin_unlock_irq(q->queue_lock); 3314 3315 c->cmd_type = CMD_RWREQ; 3316 c->rq = creq; 3317 3318 /* fill in the request */ 3319 drv = creq->rq_disk->private_data; 3320 c->Header.ReplyQueue = 0; /* unused in simple mode */ 3321 /* got command from pool, so use the command block index instead */ 3322 /* for direct lookups. */ 3323 /* The first 2 bits are reserved for controller error reporting. */ 3324 cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex); 3325 cciss_mark_tag_indexed(&c->Header.Tag.lower); 3326 memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); 3327 c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */ 3328 c->Request.Type.Type = TYPE_CMD; /* It is a command. */ 3329 c->Request.Type.Attribute = ATTR_SIMPLE; 3330 c->Request.Type.Direction = 3331 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE; 3332 c->Request.Timeout = 0; /* Don't time out */ 3333 c->Request.CDB[0] = 3334 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; 3335 start_blk = blk_rq_pos(creq); 3336 dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n", 3337 (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); 3338 sg_init_table(tmp_sg, h->maxsgentries); 3339 seg = blk_rq_map_sg(q, creq, tmp_sg); 3340 3341 /* get the DMA records for the setup */ 3342 if (c->Request.Type.Direction == XFER_READ) 3343 dir = PCI_DMA_FROMDEVICE; 3344 else 3345 dir = PCI_DMA_TODEVICE; 3346 3347 curr_sg = c->SG; 3348 sg_index = 0; 3349 chained = 0; 3350 3351 for (i = 0; i < seg; i++) { 3352 if (((sg_index+1) == (h->max_cmd_sgentries)) && 3353 !chained && ((seg - i) > 1)) { 3354 /* Point to next chain block. */ 3355 curr_sg = h->cmd_sg_list[c->cmdindex]; 3356 sg_index = 0; 3357 chained = 1; 3358 } 3359 curr_sg[sg_index].Len = tmp_sg[i].length; 3360 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]), 3361 tmp_sg[i].offset, 3362 tmp_sg[i].length, dir); 3363 curr_sg[sg_index].Addr.lower = temp64.val32.lower; 3364 curr_sg[sg_index].Addr.upper = temp64.val32.upper; 3365 curr_sg[sg_index].Ext = 0; /* we are not chaining */ 3366 ++sg_index; 3367 } 3368 if (chained) 3369 cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex], 3370 (seg - (h->max_cmd_sgentries - 1)) * 3371 sizeof(SGDescriptor_struct)); 3372 3373 /* track how many SG entries we are using */ 3374 if (seg > h->maxSG) 3375 h->maxSG = seg; 3376 3377 dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments " 3378 "chained[%d]\n", 3379 blk_rq_sectors(creq), seg, chained); 3380 3381 c->Header.SGTotal = seg + chained; 3382 if (seg <= h->max_cmd_sgentries) 3383 c->Header.SGList = c->Header.SGTotal; 3384 else 3385 c->Header.SGList = h->max_cmd_sgentries; 3386 set_performant_mode(h, c); 3387 3388 if (likely(creq->cmd_type == REQ_TYPE_FS)) { 3389 if(h->cciss_read == CCISS_READ_10) { 3390 c->Request.CDB[1] = 0; 3391 c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ 3392 c->Request.CDB[3] = (start_blk >> 16) & 0xff; 3393 c->Request.CDB[4] = (start_blk >> 8) & 0xff; 3394 c->Request.CDB[5] = start_blk & 0xff; 3395 c->Request.CDB[6] = 0; /* (sect >> 24) & 0xff; MSB */ 3396 c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff; 3397 c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff; 3398 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; 3399 } else { 3400 u32 upper32 = upper_32_bits(start_blk); 3401 3402 c->Request.CDBLen = 16; 3403 c->Request.CDB[1]= 0; 3404 c->Request.CDB[2]= (upper32 >> 24) & 0xff; /* MSB */ 3405 c->Request.CDB[3]= (upper32 >> 16) & 0xff; 3406 c->Request.CDB[4]= (upper32 >> 8) & 0xff; 3407 c->Request.CDB[5]= upper32 & 0xff; 3408 c->Request.CDB[6]= (start_blk >> 24) & 0xff; 3409 c->Request.CDB[7]= (start_blk >> 16) & 0xff; 3410 c->Request.CDB[8]= (start_blk >> 8) & 0xff; 3411 c->Request.CDB[9]= start_blk & 0xff; 3412 c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff; 3413 c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff; 3414 c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff; 3415 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; 3416 c->Request.CDB[14] = c->Request.CDB[15] = 0; 3417 } 3418 } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) { 3419 c->Request.CDBLen = creq->cmd_len; 3420 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); 3421 } else { 3422 dev_warn(&h->pdev->dev, "bad request type %d\n", 3423 creq->cmd_type); 3424 BUG(); 3425 } 3426 3427 spin_lock_irq(q->queue_lock); 3428 3429 addQ(&h->reqQ, c); 3430 h->Qdepth++; 3431 if (h->Qdepth > h->maxQsinceinit) 3432 h->maxQsinceinit = h->Qdepth; 3433 3434 goto queue; 3435full: 3436 blk_stop_queue(q); 3437startio: 3438 /* We will already have the driver lock here so not need 3439 * to lock it. 3440 */ 3441 start_io(h); 3442} 3443 3444static inline unsigned long get_next_completion(ctlr_info_t *h) 3445{ 3446 return h->access.command_completed(h); 3447} 3448 3449static inline int interrupt_pending(ctlr_info_t *h) 3450{ 3451 return h->access.intr_pending(h); 3452} 3453 3454static inline long interrupt_not_for_us(ctlr_info_t *h) 3455{ 3456 return ((h->access.intr_pending(h) == 0) || 3457 (h->interrupts_enabled == 0)); 3458} 3459 3460static inline int bad_tag(ctlr_info_t *h, u32 tag_index, 3461 u32 raw_tag) 3462{ 3463 if (unlikely(tag_index >= h->nr_cmds)) { 3464 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 3465 return 1; 3466 } 3467 return 0; 3468} 3469 3470static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c, 3471 u32 raw_tag) 3472{ 3473 removeQ(c); 3474 if (likely(c->cmd_type == CMD_RWREQ)) 3475 complete_command(h, c, 0); 3476 else if (c->cmd_type == CMD_IOCTL_PEND) 3477 complete(c->waiting); 3478#ifdef CONFIG_CISS_SCSI_TAPE 3479 else if (c->cmd_type == CMD_SCSI) 3480 complete_scsi_command(c, 0, raw_tag); 3481#endif 3482} 3483 3484static inline u32 next_command(ctlr_info_t *h) 3485{ 3486 u32 a; 3487 3488 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 3489 return h->access.command_completed(h); 3490 3491 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 3492 a = *(h->reply_pool_head); /* Next cmd in ring buffer */ 3493 (h->reply_pool_head)++; 3494 h->commands_outstanding--; 3495 } else { 3496 a = FIFO_EMPTY; 3497 } 3498 /* Check for wraparound */ 3499 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 3500 h->reply_pool_head = h->reply_pool; 3501 h->reply_pool_wraparound ^= 1; 3502 } 3503 return a; 3504} 3505 3506/* process completion of an indexed ("direct lookup") command */ 3507static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag) 3508{ 3509 u32 tag_index; 3510 CommandList_struct *c; 3511 3512 tag_index = cciss_tag_to_index(raw_tag); 3513 if (bad_tag(h, tag_index, raw_tag)) 3514 return next_command(h); 3515 c = h->cmd_pool + tag_index; 3516 finish_cmd(h, c, raw_tag); 3517 return next_command(h); 3518} 3519 3520/* process completion of a non-indexed command */ 3521static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) 3522{ 3523 CommandList_struct *c = NULL; 3524 __u32 busaddr_masked, tag_masked; 3525 3526 tag_masked = cciss_tag_discard_error_bits(h, raw_tag); 3527 list_for_each_entry(c, &h->cmpQ, list) { 3528 busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr); 3529 if (busaddr_masked == tag_masked) { 3530 finish_cmd(h, c, raw_tag); 3531 return next_command(h); 3532 } 3533 } 3534 bad_tag(h, h->nr_cmds + 1, raw_tag); 3535 return next_command(h); 3536} 3537 3538/* Some controllers, like p400, will give us one interrupt 3539 * after a soft reset, even if we turned interrupts off. 3540 * Only need to check for this in the cciss_xxx_discard_completions 3541 * functions. 3542 */ 3543static int ignore_bogus_interrupt(ctlr_info_t *h) 3544{ 3545 if (likely(!reset_devices)) 3546 return 0; 3547 3548 if (likely(h->interrupts_enabled)) 3549 return 0; 3550 3551 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " 3552 "(known firmware bug.) Ignoring.\n"); 3553 3554 return 1; 3555} 3556 3557static irqreturn_t cciss_intx_discard_completions(int irq, void *dev_id) 3558{ 3559 ctlr_info_t *h = dev_id; 3560 unsigned long flags; 3561 u32 raw_tag; 3562 3563 if (ignore_bogus_interrupt(h)) 3564 return IRQ_NONE; 3565 3566 if (interrupt_not_for_us(h)) 3567 return IRQ_NONE; 3568 spin_lock_irqsave(&h->lock, flags); 3569 while (interrupt_pending(h)) { 3570 raw_tag = get_next_completion(h); 3571 while (raw_tag != FIFO_EMPTY) 3572 raw_tag = next_command(h); 3573 } 3574 spin_unlock_irqrestore(&h->lock, flags); 3575 return IRQ_HANDLED; 3576} 3577 3578static irqreturn_t cciss_msix_discard_completions(int irq, void *dev_id) 3579{ 3580 ctlr_info_t *h = dev_id; 3581 unsigned long flags; 3582 u32 raw_tag; 3583 3584 if (ignore_bogus_interrupt(h)) 3585 return IRQ_NONE; 3586 3587 spin_lock_irqsave(&h->lock, flags); 3588 raw_tag = get_next_completion(h); 3589 while (raw_tag != FIFO_EMPTY) 3590 raw_tag = next_command(h); 3591 spin_unlock_irqrestore(&h->lock, flags); 3592 return IRQ_HANDLED; 3593} 3594 3595static irqreturn_t do_cciss_intx(int irq, void *dev_id) 3596{ 3597 ctlr_info_t *h = dev_id; 3598 unsigned long flags; 3599 u32 raw_tag; 3600 3601 if (interrupt_not_for_us(h)) 3602 return IRQ_NONE; 3603 spin_lock_irqsave(&h->lock, flags); 3604 while (interrupt_pending(h)) { 3605 raw_tag = get_next_completion(h); 3606 while (raw_tag != FIFO_EMPTY) { 3607 if (cciss_tag_contains_index(raw_tag)) 3608 raw_tag = process_indexed_cmd(h, raw_tag); 3609 else 3610 raw_tag = process_nonindexed_cmd(h, raw_tag); 3611 } 3612 } 3613 spin_unlock_irqrestore(&h->lock, flags); 3614 return IRQ_HANDLED; 3615} 3616 3617/* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never 3618 * check the interrupt pending register because it is not set. 3619 */ 3620static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id) 3621{ 3622 ctlr_info_t *h = dev_id; 3623 unsigned long flags; 3624 u32 raw_tag; 3625 3626 spin_lock_irqsave(&h->lock, flags); 3627 raw_tag = get_next_completion(h); 3628 while (raw_tag != FIFO_EMPTY) { 3629 if (cciss_tag_contains_index(raw_tag)) 3630 raw_tag = process_indexed_cmd(h, raw_tag); 3631 else 3632 raw_tag = process_nonindexed_cmd(h, raw_tag); 3633 } 3634 spin_unlock_irqrestore(&h->lock, flags); 3635 return IRQ_HANDLED; 3636} 3637 3638/** 3639 * add_to_scan_list() - add controller to rescan queue 3640 * @h: Pointer to the controller. 3641 * 3642 * Adds the controller to the rescan queue if not already on the queue. 3643 * 3644 * returns 1 if added to the queue, 0 if skipped (could be on the 3645 * queue already, or the controller could be initializing or shutting 3646 * down). 3647 **/ 3648static int add_to_scan_list(struct ctlr_info *h) 3649{ 3650 struct ctlr_info *test_h; 3651 int found = 0; 3652 int ret = 0; 3653 3654 if (h->busy_initializing) 3655 return 0; 3656 3657 if (!mutex_trylock(&h->busy_shutting_down)) 3658 return 0; 3659 3660 mutex_lock(&scan_mutex); 3661 list_for_each_entry(test_h, &scan_q, scan_list) { 3662 if (test_h == h) { 3663 found = 1; 3664 break; 3665 } 3666 } 3667 if (!found && !h->busy_scanning) { 3668 INIT_COMPLETION(h->scan_wait); 3669 list_add_tail(&h->scan_list, &scan_q); 3670 ret = 1; 3671 } 3672 mutex_unlock(&scan_mutex); 3673 mutex_unlock(&h->busy_shutting_down); 3674 3675 return ret; 3676} 3677 3678/** 3679 * remove_from_scan_list() - remove controller from rescan queue 3680 * @h: Pointer to the controller. 3681 * 3682 * Removes the controller from the rescan queue if present. Blocks if 3683 * the controller is currently conducting a rescan. The controller 3684 * can be in one of three states: 3685 * 1. Doesn't need a scan 3686 * 2. On the scan list, but not scanning yet (we remove it) 3687 * 3. Busy scanning (and not on the list). In this case we want to wait for 3688 * the scan to complete to make sure the scanning thread for this 3689 * controller is completely idle. 3690 **/ 3691static void remove_from_scan_list(struct ctlr_info *h) 3692{ 3693 struct ctlr_info *test_h, *tmp_h; 3694 3695 mutex_lock(&scan_mutex); 3696 list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) { 3697 if (test_h == h) { /* state 2. */ 3698 list_del(&h->scan_list); 3699 complete_all(&h->scan_wait); 3700 mutex_unlock(&scan_mutex); 3701 return; 3702 } 3703 } 3704 if (h->busy_scanning) { /* state 3. */ 3705 mutex_unlock(&scan_mutex); 3706 wait_for_completion(&h->scan_wait); 3707 } else { /* state 1, nothing to do. */ 3708 mutex_unlock(&scan_mutex); 3709 } 3710} 3711 3712/** 3713 * scan_thread() - kernel thread used to rescan controllers 3714 * @data: Ignored. 3715 * 3716 * A kernel thread used scan for drive topology changes on 3717 * controllers. The thread processes only one controller at a time 3718 * using a queue. Controllers are added to the queue using 3719 * add_to_scan_list() and removed from the queue either after done 3720 * processing or using remove_from_scan_list(). 3721 * 3722 * returns 0. 3723 **/ 3724static int scan_thread(void *data) 3725{ 3726 struct ctlr_info *h; 3727 3728 while (1) { 3729 set_current_state(TASK_INTERRUPTIBLE); 3730 schedule(); 3731 if (kthread_should_stop()) 3732 break; 3733 3734 while (1) { 3735 mutex_lock(&scan_mutex); 3736 if (list_empty(&scan_q)) { 3737 mutex_unlock(&scan_mutex); 3738 break; 3739 } 3740 3741 h = list_entry(scan_q.next, 3742 struct ctlr_info, 3743 scan_list); 3744 list_del(&h->scan_list); 3745 h->busy_scanning = 1; 3746 mutex_unlock(&scan_mutex); 3747 3748 rebuild_lun_table(h, 0, 0); 3749 complete_all(&h->scan_wait); 3750 mutex_lock(&scan_mutex); 3751 h->busy_scanning = 0; 3752 mutex_unlock(&scan_mutex); 3753 } 3754 } 3755 3756 return 0; 3757} 3758 3759static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) 3760{ 3761 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 3762 return 0; 3763 3764 switch (c->err_info->SenseInfo[12]) { 3765 case STATE_CHANGED: 3766 dev_warn(&h->pdev->dev, "a state change " 3767 "detected, command retried\n"); 3768 return 1; 3769 break; 3770 case LUN_FAILED: 3771 dev_warn(&h->pdev->dev, "LUN failure " 3772 "detected, action required\n"); 3773 return 1; 3774 break; 3775 case REPORT_LUNS_CHANGED: 3776 dev_warn(&h->pdev->dev, "report LUN data changed\n"); 3777 /* 3778 * Here, we could call add_to_scan_list and wake up the scan thread, 3779 * except that it's quite likely that we will get more than one 3780 * REPORT_LUNS_CHANGED condition in quick succession, which means 3781 * that those which occur after the first one will likely happen 3782 * *during* the scan_thread's rescan. And the rescan code is not 3783 * robust enough to restart in the middle, undoing what it has already 3784 * done, and it's not clear that it's even possible to do this, since 3785 * part of what it does is notify the block layer, which starts 3786 * doing it's own i/o to read partition tables and so on, and the 3787 * driver doesn't have visibility to know what might need undoing. 3788 * In any event, if possible, it is horribly complicated to get right 3789 * so we just don't do it for now. 3790 * 3791 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. 3792 */ 3793 return 1; 3794 break; 3795 case POWER_OR_RESET: 3796 dev_warn(&h->pdev->dev, 3797 "a power on or device reset detected\n"); 3798 return 1; 3799 break; 3800 case UNIT_ATTENTION_CLEARED: 3801 dev_warn(&h->pdev->dev, 3802 "unit attention cleared by another initiator\n"); 3803 return 1; 3804 break; 3805 default: 3806 dev_warn(&h->pdev->dev, "unknown unit attention detected\n"); 3807 return 1; 3808 } 3809} 3810 3811/* 3812 * We cannot read the structure directly, for portability we must use 3813 * the io functions. 3814 * This is for debug only. 3815 */ 3816static void print_cfg_table(ctlr_info_t *h) 3817{ 3818 int i; 3819 char temp_name[17]; 3820 CfgTable_struct *tb = h->cfgtable; 3821 3822 dev_dbg(&h->pdev->dev, "Controller Configuration information\n"); 3823 dev_dbg(&h->pdev->dev, "------------------------------------\n"); 3824 for (i = 0; i < 4; i++) 3825 temp_name[i] = readb(&(tb->Signature[i])); 3826 temp_name[4] = '\0'; 3827 dev_dbg(&h->pdev->dev, " Signature = %s\n", temp_name); 3828 dev_dbg(&h->pdev->dev, " Spec Number = %d\n", 3829 readl(&(tb->SpecValence))); 3830 dev_dbg(&h->pdev->dev, " Transport methods supported = 0x%x\n", 3831 readl(&(tb->TransportSupport))); 3832 dev_dbg(&h->pdev->dev, " Transport methods active = 0x%x\n", 3833 readl(&(tb->TransportActive))); 3834 dev_dbg(&h->pdev->dev, " Requested transport Method = 0x%x\n", 3835 readl(&(tb->HostWrite.TransportRequest))); 3836 dev_dbg(&h->pdev->dev, " Coalesce Interrupt Delay = 0x%x\n", 3837 readl(&(tb->HostWrite.CoalIntDelay))); 3838 dev_dbg(&h->pdev->dev, " Coalesce Interrupt Count = 0x%x\n", 3839 readl(&(tb->HostWrite.CoalIntCount))); 3840 dev_dbg(&h->pdev->dev, " Max outstanding commands = 0x%d\n", 3841 readl(&(tb->CmdsOutMax))); 3842 dev_dbg(&h->pdev->dev, " Bus Types = 0x%x\n", 3843 readl(&(tb->BusTypes))); 3844 for (i = 0; i < 16; i++) 3845 temp_name[i] = readb(&(tb->ServerName[i])); 3846 temp_name[16] = '\0'; 3847 dev_dbg(&h->pdev->dev, " Server Name = %s\n", temp_name); 3848 dev_dbg(&h->pdev->dev, " Heartbeat Counter = 0x%x\n\n\n", 3849 readl(&(tb->HeartBeat))); 3850} 3851 3852static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 3853{ 3854 int i, offset, mem_type, bar_type; 3855 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 3856 return 0; 3857 offset = 0; 3858 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 3859 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 3860 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 3861 offset += 4; 3862 else { 3863 mem_type = pci_resource_flags(pdev, i) & 3864 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 3865 switch (mem_type) { 3866 case PCI_BASE_ADDRESS_MEM_TYPE_32: 3867 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 3868 offset += 4; /* 32 bit */ 3869 break; 3870 case PCI_BASE_ADDRESS_MEM_TYPE_64: 3871 offset += 8; 3872 break; 3873 default: /* reserved in PCI 2.2 */ 3874 dev_warn(&pdev->dev, 3875 "Base address is invalid\n"); 3876 return -1; 3877 break; 3878 } 3879 } 3880 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 3881 return i + 1; 3882 } 3883 return -1; 3884} 3885 3886/* Fill in bucket_map[], given nsgs (the max number of 3887 * scatter gather elements supported) and bucket[], 3888 * which is an array of 8 integers. The bucket[] array 3889 * contains 8 different DMA transfer sizes (in 16 3890 * byte increments) which the controller uses to fetch 3891 * commands. This function fills in bucket_map[], which 3892 * maps a given number of scatter gather elements to one of 3893 * the 8 DMA transfer sizes. The point of it is to allow the 3894 * controller to only do as much DMA as needed to fetch the 3895 * command, with the DMA transfer size encoded in the lower 3896 * bits of the command address. 3897 */ 3898static void calc_bucket_map(int bucket[], int num_buckets, 3899 int nsgs, int *bucket_map) 3900{ 3901 int i, j, b, size; 3902 3903 /* even a command with 0 SGs requires 4 blocks */ 3904#define MINIMUM_TRANSFER_BLOCKS 4 3905#define NUM_BUCKETS 8 3906 /* Note, bucket_map must have nsgs+1 entries. */ 3907 for (i = 0; i <= nsgs; i++) { 3908 /* Compute size of a command with i SG entries */ 3909 size = i + MINIMUM_TRANSFER_BLOCKS; 3910 b = num_buckets; /* Assume the biggest bucket */ 3911 /* Find the bucket that is just big enough */ 3912 for (j = 0; j < 8; j++) { 3913 if (bucket[j] >= size) { 3914 b = j; 3915 break; 3916 } 3917 } 3918 /* for a command with i SG entries, use bucket b. */ 3919 bucket_map[i] = b; 3920 } 3921} 3922 3923static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h) 3924{ 3925 int i; 3926 3927 /* under certain very rare conditions, this can take awhile. 3928 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 3929 * as we enter this code.) */ 3930 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 3931 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) 3932 break; 3933 usleep_range(10000, 20000); 3934 } 3935} 3936 3937static __devinit void cciss_enter_performant_mode(ctlr_info_t *h, 3938 u32 use_short_tags) 3939{ 3940 /* This is a bit complicated. There are 8 registers on 3941 * the controller which we write to to tell it 8 different 3942 * sizes of commands which there may be. It's a way of 3943 * reducing the DMA done to fetch each command. Encoded into 3944 * each command's tag are 3 bits which communicate to the controller 3945 * which of the eight sizes that command fits within. The size of 3946 * each command depends on how many scatter gather entries there are. 3947 * Each SG entry requires 16 bytes. The eight registers are programmed 3948 * with the number of 16-byte blocks a command of that size requires. 3949 * The smallest command possible requires 5 such 16 byte blocks. 3950 * the largest command possible requires MAXSGENTRIES + 4 16-byte 3951 * blocks. Note, this only extends to the SG entries contained 3952 * within the command block, and does not extend to chained blocks 3953 * of SG elements. bft[] contains the eight values we write to 3954 * the registers. They are not evenly distributed, but have more 3955 * sizes for small commands, and fewer sizes for larger commands. 3956 */ 3957 __u32 trans_offset; 3958 int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; 3959 /* 3960 * 5 = 1 s/g entry or 4k 3961 * 6 = 2 s/g entry or 8k 3962 * 8 = 4 s/g entry or 16k 3963 * 10 = 6 s/g entry or 24k 3964 */ 3965 unsigned long register_value; 3966 BUILD_BUG_ON(28 > MAXSGENTRIES + 4); 3967 3968 h->reply_pool_wraparound = 1; /* spec: init to 1 */ 3969 3970 /* Controller spec: zero out this buffer. */ 3971 memset(h->reply_pool, 0, h->max_commands * sizeof(__u64)); 3972 h->reply_pool_head = h->reply_pool; 3973 3974 trans_offset = readl(&(h->cfgtable->TransMethodOffset)); 3975 calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries, 3976 h->blockFetchTable); 3977 writel(bft[0], &h->transtable->BlockFetch0); 3978 writel(bft[1], &h->transtable->BlockFetch1); 3979 writel(bft[2], &h->transtable->BlockFetch2); 3980 writel(bft[3], &h->transtable->BlockFetch3); 3981 writel(bft[4], &h->transtable->BlockFetch4); 3982 writel(bft[5], &h->transtable->BlockFetch5); 3983 writel(bft[6], &h->transtable->BlockFetch6); 3984 writel(bft[7], &h->transtable->BlockFetch7); 3985 3986 /* size of controller ring buffer */ 3987 writel(h->max_commands, &h->transtable->RepQSize); 3988 writel(1, &h->transtable->RepQCount); 3989 writel(0, &h->transtable->RepQCtrAddrLow32); 3990 writel(0, &h->transtable->RepQCtrAddrHigh32); 3991 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); 3992 writel(0, &h->transtable->RepQAddr0High32); 3993 writel(CFGTBL_Trans_Performant | use_short_tags, 3994 &(h->cfgtable->HostWrite.TransportRequest)); 3995 3996 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 3997 cciss_wait_for_mode_change_ack(h); 3998 register_value = readl(&(h->cfgtable->TransportActive)); 3999 if (!(register_value & CFGTBL_Trans_Performant)) 4000 dev_warn(&h->pdev->dev, "cciss: unable to get board into" 4001 " performant mode\n"); 4002} 4003 4004static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h) 4005{ 4006 __u32 trans_support; 4007 4008 if (cciss_simple_mode) 4009 return; 4010 4011 dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n"); 4012 /* Attempt to put controller into performant mode if supported */ 4013 /* Does board support performant mode? */ 4014 trans_support = readl(&(h->cfgtable->TransportSupport)); 4015 if (!(trans_support & PERFORMANT_MODE)) 4016 return; 4017 4018 dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n"); 4019 /* Performant mode demands commands on a 32 byte boundary 4020 * pci_alloc_consistent aligns on page boundarys already. 4021 * Just need to check if divisible by 32 4022 */ 4023 if ((sizeof(CommandList_struct) % 32) != 0) { 4024 dev_warn(&h->pdev->dev, "%s %d %s\n", 4025 "cciss info: command size[", 4026 (int)sizeof(CommandList_struct), 4027 "] not divisible by 32, no performant mode..\n"); 4028 return; 4029 } 4030 4031 /* Performant mode ring buffer and supporting data structures */ 4032 h->reply_pool = (__u64 *)pci_alloc_consistent( 4033 h->pdev, h->max_commands * sizeof(__u64), 4034 &(h->reply_pool_dhandle)); 4035 4036 /* Need a block fetch table for performant mode */ 4037 h->blockFetchTable = kmalloc(((h->maxsgentries+1) * 4038 sizeof(__u32)), GFP_KERNEL); 4039 4040 if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) 4041 goto clean_up; 4042 4043 cciss_enter_performant_mode(h, 4044 trans_support & CFGTBL_Trans_use_short_tags); 4045 4046 /* Change the access methods to the performant access methods */ 4047 h->access = SA5_performant_access; 4048 h->transMethod = CFGTBL_Trans_Performant; 4049 4050 return; 4051clean_up: 4052 kfree(h->blockFetchTable); 4053 if (h->reply_pool) 4054 pci_free_consistent(h->pdev, 4055 h->max_commands * sizeof(__u64), 4056 h->reply_pool, 4057 h->reply_pool_dhandle); 4058 return; 4059 4060} /* cciss_put_controller_into_performant_mode */ 4061 4062/* If MSI/MSI-X is supported by the kernel we will try to enable it on 4063 * controllers that are capable. If not, we use IO-APIC mode. 4064 */ 4065 4066static void __devinit cciss_interrupt_mode(ctlr_info_t *h) 4067{ 4068#ifdef CONFIG_PCI_MSI 4069 int err; 4070 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1}, 4071 {0, 2}, {0, 3} 4072 }; 4073 4074 /* Some boards advertise MSI but don't really support it */ 4075 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 4076 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 4077 goto default_int_mode; 4078 4079 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 4080 err = pci_enable_msix(h->pdev, cciss_msix_entries, 4); 4081 if (!err) { 4082 h->intr[0] = cciss_msix_entries[0].vector; 4083 h->intr[1] = cciss_msix_entries[1].vector; 4084 h->intr[2] = cciss_msix_entries[2].vector; 4085 h->intr[3] = cciss_msix_entries[3].vector; 4086 h->msix_vector = 1; 4087 return; 4088 } 4089 if (err > 0) { 4090 dev_warn(&h->pdev->dev, 4091 "only %d MSI-X vectors available\n", err); 4092 goto default_int_mode; 4093 } else { 4094 dev_warn(&h->pdev->dev, 4095 "MSI-X init failed %d\n", err); 4096 goto default_int_mode; 4097 } 4098 } 4099 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 4100 if (!pci_enable_msi(h->pdev)) 4101 h->msi_vector = 1; 4102 else 4103 dev_warn(&h->pdev->dev, "MSI init failed\n"); 4104 } 4105default_int_mode: 4106#endif /* CONFIG_PCI_MSI */ 4107 /* if we get here we're going to use the default interrupt mode */ 4108 h->intr[h->intr_mode] = h->pdev->irq; 4109 return; 4110} 4111 4112static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 4113{ 4114 int i; 4115 u32 subsystem_vendor_id, subsystem_device_id; 4116 4117 subsystem_vendor_id = pdev->subsystem_vendor; 4118 subsystem_device_id = pdev->subsystem_device; 4119 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 4120 subsystem_vendor_id; 4121 4122 for (i = 0; i < ARRAY_SIZE(products); i++) 4123 if (*board_id == products[i].board_id) 4124 return i; 4125 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", 4126 *board_id); 4127 return -ENODEV; 4128} 4129 4130static inline bool cciss_board_disabled(ctlr_info_t *h) 4131{ 4132 u16 command; 4133 4134 (void) pci_read_config_word(h->pdev, PCI_COMMAND, &command); 4135 return ((command & PCI_COMMAND_MEMORY) == 0); 4136} 4137 4138static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, 4139 unsigned long *memory_bar) 4140{ 4141 int i; 4142 4143 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 4144 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 4145 /* addressing mode bits already removed */ 4146 *memory_bar = pci_resource_start(pdev, i); 4147 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 4148 *memory_bar); 4149 return 0; 4150 } 4151 dev_warn(&pdev->dev, "no memory BAR found\n"); 4152 return -ENODEV; 4153} 4154 4155static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev, 4156 void __iomem *vaddr, int wait_for_ready) 4157#define BOARD_READY 1 4158#define BOARD_NOT_READY 0 4159{ 4160 int i, iterations; 4161 u32 scratchpad; 4162 4163 if (wait_for_ready) 4164 iterations = CCISS_BOARD_READY_ITERATIONS; 4165 else 4166 iterations = CCISS_BOARD_NOT_READY_ITERATIONS; 4167 4168 for (i = 0; i < iterations; i++) { 4169 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 4170 if (wait_for_ready) { 4171 if (scratchpad == CCISS_FIRMWARE_READY) 4172 return 0; 4173 } else { 4174 if (scratchpad != CCISS_FIRMWARE_READY) 4175 return 0; 4176 } 4177 msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); 4178 } 4179 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 4180 return -ENODEV; 4181} 4182 4183static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev, 4184 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 4185 u64 *cfg_offset) 4186{ 4187 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 4188 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 4189 *cfg_base_addr &= (u32) 0x0000ffff; 4190 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 4191 if (*cfg_base_addr_index == -1) { 4192 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, " 4193 "*cfg_base_addr = 0x%08x\n", *cfg_base_addr); 4194 return -ENODEV; 4195 } 4196 return 0; 4197} 4198 4199static int __devinit cciss_find_cfgtables(ctlr_info_t *h) 4200{ 4201 u64 cfg_offset; 4202 u32 cfg_base_addr; 4203 u64 cfg_base_addr_index; 4204 u32 trans_offset; 4205 int rc; 4206 4207 rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 4208 &cfg_base_addr_index, &cfg_offset); 4209 if (rc) 4210 return rc; 4211 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 4212 cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable)); 4213 if (!h->cfgtable) 4214 return -ENOMEM; 4215 rc = write_driver_ver_to_cfgtable(h->cfgtable); 4216 if (rc) 4217 return rc; 4218 /* Find performant mode table. */ 4219 trans_offset = readl(&h->cfgtable->TransMethodOffset); 4220 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 4221 cfg_base_addr_index)+cfg_offset+trans_offset, 4222 sizeof(*h->transtable)); 4223 if (!h->transtable) 4224 return -ENOMEM; 4225 return 0; 4226} 4227 4228static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h) 4229{ 4230 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 4231 4232 /* Limit commands in memory limited kdump scenario. */ 4233 if (reset_devices && h->max_commands > 32) 4234 h->max_commands = 32; 4235 4236 if (h->max_commands < 16) { 4237 dev_warn(&h->pdev->dev, "Controller reports " 4238 "max supported commands of %d, an obvious lie. " 4239 "Using 16. Ensure that firmware is up to date.\n", 4240 h->max_commands); 4241 h->max_commands = 16; 4242 } 4243} 4244 4245/* Interrogate the hardware for some limits: 4246 * max commands, max SG elements without chaining, and with chaining, 4247 * SG chain block size, etc. 4248 */ 4249static void __devinit cciss_find_board_params(ctlr_info_t *h) 4250{ 4251 cciss_get_max_perf_mode_cmds(h); 4252 h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds; 4253 h->maxsgentries = readl(&(h->cfgtable->MaxSGElements)); 4254 /* 4255 * Limit in-command s/g elements to 32 save dma'able memory. 4256 * Howvever spec says if 0, use 31 4257 */ 4258 h->max_cmd_sgentries = 31; 4259 if (h->maxsgentries > 512) { 4260 h->max_cmd_sgentries = 32; 4261 h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1; 4262 h->maxsgentries--; /* save one for chain pointer */ 4263 } else { 4264 h->maxsgentries = 31; /* default to traditional values */ 4265 h->chainsize = 0; 4266 } 4267} 4268 4269static inline bool CISS_signature_present(ctlr_info_t *h) 4270{ 4271 if ((readb(&h->cfgtable->Signature[0]) != 'C') || 4272 (readb(&h->cfgtable->Signature[1]) != 'I') || 4273 (readb(&h->cfgtable->Signature[2]) != 'S') || 4274 (readb(&h->cfgtable->Signature[3]) != 'S')) { 4275 dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); 4276 return false; 4277 } 4278 return true; 4279} 4280 4281/* Need to enable prefetch in the SCSI core for 6400 in x86 */ 4282static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h) 4283{ 4284#ifdef CONFIG_X86 4285 u32 prefetch; 4286 4287 prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); 4288 prefetch |= 0x100; 4289 writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); 4290#endif 4291} 4292 4293/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 4294 * in a prefetch beyond physical memory. 4295 */ 4296static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h) 4297{ 4298 u32 dma_prefetch; 4299 __u32 dma_refetch; 4300 4301 if (h->board_id != 0x3225103C) 4302 return; 4303 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 4304 dma_prefetch |= 0x8000; 4305 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 4306 pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch); 4307 dma_refetch |= 0x1; 4308 pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch); 4309} 4310 4311static int __devinit cciss_pci_init(ctlr_info_t *h) 4312{ 4313 int prod_index, err; 4314 4315 prod_index = cciss_lookup_board_id(h->pdev, &h->board_id); 4316 if (prod_index < 0) 4317 return -ENODEV; 4318 h->product_name = products[prod_index].product_name; 4319 h->access = *(products[prod_index].access); 4320 4321 if (cciss_board_disabled(h)) { 4322 dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 4323 return -ENODEV; 4324 } 4325 4326 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 4327 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 4328 4329 err = pci_enable_device(h->pdev); 4330 if (err) { 4331 dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n"); 4332 return err; 4333 } 4334 4335 err = pci_request_regions(h->pdev, "cciss"); 4336 if (err) { 4337 dev_warn(&h->pdev->dev, 4338 "Cannot obtain PCI resources, aborting\n"); 4339 return err; 4340 } 4341 4342 dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq); 4343 dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id); 4344 4345/* If the kernel supports MSI/MSI-X we will try to enable that functionality, 4346 * else we use the IO-APIC interrupt assigned to us by system ROM. 4347 */ 4348 cciss_interrupt_mode(h); 4349 err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr); 4350 if (err) 4351 goto err_out_free_res; 4352 h->vaddr = remap_pci_mem(h->paddr, 0x250); 4353 if (!h->vaddr) { 4354 err = -ENOMEM; 4355 goto err_out_free_res; 4356 } 4357 err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 4358 if (err) 4359 goto err_out_free_res; 4360 err = cciss_find_cfgtables(h); 4361 if (err) 4362 goto err_out_free_res; 4363 print_cfg_table(h); 4364 cciss_find_board_params(h); 4365 4366 if (!CISS_signature_present(h)) { 4367 err = -ENODEV; 4368 goto err_out_free_res; 4369 } 4370 cciss_enable_scsi_prefetch(h); 4371 cciss_p600_dma_prefetch_quirk(h); 4372 err = cciss_enter_simple_mode(h); 4373 if (err) 4374 goto err_out_free_res; 4375 cciss_put_controller_into_performant_mode(h); 4376 return 0; 4377 4378err_out_free_res: 4379 /* 4380 * Deliberately omit pci_disable_device(): it does something nasty to 4381 * Smart Array controllers that pci_enable_device does not undo 4382 */ 4383 if (h->transtable) 4384 iounmap(h->transtable); 4385 if (h->cfgtable) 4386 iounmap(h->cfgtable); 4387 if (h->vaddr) 4388 iounmap(h->vaddr); 4389 pci_release_regions(h->pdev); 4390 return err; 4391} 4392 4393/* Function to find the first free pointer into our hba[] array 4394 * Returns -1 if no free entries are left. 4395 */ 4396static int alloc_cciss_hba(struct pci_dev *pdev) 4397{ 4398 int i; 4399 4400 for (i = 0; i < MAX_CTLR; i++) { 4401 if (!hba[i]) { 4402 ctlr_info_t *h; 4403 4404 h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); 4405 if (!h) 4406 goto Enomem; 4407 hba[i] = h; 4408 return i; 4409 } 4410 } 4411 dev_warn(&pdev->dev, "This driver supports a maximum" 4412 " of %d controllers.\n", MAX_CTLR); 4413 return -1; 4414Enomem: 4415 dev_warn(&pdev->dev, "out of memory.\n"); 4416 return -1; 4417} 4418 4419static void free_hba(ctlr_info_t *h) 4420{ 4421 int i; 4422 4423 hba[h->ctlr] = NULL; 4424 for (i = 0; i < h->highest_lun + 1; i++) 4425 if (h->gendisk[i] != NULL) 4426 put_disk(h->gendisk[i]); 4427 kfree(h); 4428} 4429 4430/* Send a message CDB to the firmware. */ 4431static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type) 4432{ 4433 typedef struct { 4434 CommandListHeader_struct CommandHeader; 4435 RequestBlock_struct Request; 4436 ErrDescriptor_struct ErrorDescriptor; 4437 } Command; 4438 static const size_t cmd_sz = sizeof(Command) + sizeof(ErrorInfo_struct); 4439 Command *cmd; 4440 dma_addr_t paddr64; 4441 uint32_t paddr32, tag; 4442 void __iomem *vaddr; 4443 int i, err; 4444 4445 vaddr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); 4446 if (vaddr == NULL) 4447 return -ENOMEM; 4448 4449 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 4450 CCISS commands, so they must be allocated from the lower 4GiB of 4451 memory. */ 4452 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 4453 if (err) { 4454 iounmap(vaddr); 4455 return -ENOMEM; 4456 } 4457 4458 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 4459 if (cmd == NULL) { 4460 iounmap(vaddr); 4461 return -ENOMEM; 4462 } 4463 4464 /* This must fit, because of the 32-bit consistent DMA mask. Also, 4465 although there's no guarantee, we assume that the address is at 4466 least 4-byte aligned (most likely, it's page-aligned). */ 4467 paddr32 = paddr64; 4468 4469 cmd->CommandHeader.ReplyQueue = 0; 4470 cmd->CommandHeader.SGList = 0; 4471 cmd->CommandHeader.SGTotal = 0; 4472 cmd->CommandHeader.Tag.lower = paddr32; 4473 cmd->CommandHeader.Tag.upper = 0; 4474 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 4475 4476 cmd->Request.CDBLen = 16; 4477 cmd->Request.Type.Type = TYPE_MSG; 4478 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; 4479 cmd->Request.Type.Direction = XFER_NONE; 4480 cmd->Request.Timeout = 0; /* Don't time out */ 4481 cmd->Request.CDB[0] = opcode; 4482 cmd->Request.CDB[1] = type; 4483 memset(&cmd->Request.CDB[2], 0, 14); /* the rest of the CDB is reserved */ 4484 4485 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(Command); 4486 cmd->ErrorDescriptor.Addr.upper = 0; 4487 cmd->ErrorDescriptor.Len = sizeof(ErrorInfo_struct); 4488 4489 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); 4490 4491 for (i = 0; i < 10; i++) { 4492 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 4493 if ((tag & ~3) == paddr32) 4494 break; 4495 msleep(CCISS_POST_RESET_NOOP_TIMEOUT_MSECS); 4496 } 4497 4498 iounmap(vaddr); 4499 4500 /* we leak the DMA buffer here ... no choice since the controller could 4501 still complete the command. */ 4502 if (i == 10) { 4503 dev_err(&pdev->dev, 4504 "controller message %02x:%02x timed out\n", 4505 opcode, type); 4506 return -ETIMEDOUT; 4507 } 4508 4509 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 4510 4511 if (tag & 2) { 4512 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 4513 opcode, type); 4514 return -EIO; 4515 } 4516 4517 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 4518 opcode, type); 4519 return 0; 4520} 4521 4522#define cciss_noop(p) cciss_message(p, 3, 0) 4523 4524static int cciss_controller_hard_reset(struct pci_dev *pdev, 4525 void * __iomem vaddr, u32 use_doorbell) 4526{ 4527 u16 pmcsr; 4528 int pos; 4529 4530 if (use_doorbell) { 4531 /* For everything after the P600, the PCI power state method 4532 * of resetting the controller doesn't work, so we have this 4533 * other way using the doorbell register. 4534 */ 4535 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 4536 writel(use_doorbell, vaddr + SA5_DOORBELL); 4537 } else { /* Try to do it the PCI power state way */ 4538 4539 /* Quoting from the Open CISS Specification: "The Power 4540 * Management Control/Status Register (CSR) controls the power 4541 * state of the device. The normal operating state is D0, 4542 * CSR=00h. The software off state is D3, CSR=03h. To reset 4543 * the controller, place the interface device in D3 then to D0, 4544 * this causes a secondary PCI reset which will reset the 4545 * controller." */ 4546 4547 pos = pci_find_capability(pdev, PCI_CAP_ID_PM); 4548 if (pos == 0) { 4549 dev_err(&pdev->dev, 4550 "cciss_controller_hard_reset: " 4551 "PCI PM not supported\n"); 4552 return -ENODEV; 4553 } 4554 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 4555 /* enter the D3hot power management state */ 4556 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 4557 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 4558 pmcsr |= PCI_D3hot; 4559 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 4560 4561 msleep(500); 4562 4563 /* enter the D0 power management state */ 4564 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 4565 pmcsr |= PCI_D0; 4566 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 4567 4568 /* 4569 * The P600 requires a small delay when changing states. 4570 * Otherwise we may think the board did not reset and we bail. 4571 * This for kdump only and is particular to the P600. 4572 */ 4573 msleep(500); 4574 } 4575 return 0; 4576} 4577 4578static __devinit void init_driver_version(char *driver_version, int len) 4579{ 4580 memset(driver_version, 0, len); 4581 strncpy(driver_version, "cciss " DRIVER_NAME, len - 1); 4582} 4583 4584static __devinit int write_driver_ver_to_cfgtable( 4585 CfgTable_struct __iomem *cfgtable) 4586{ 4587 char *driver_version; 4588 int i, size = sizeof(cfgtable->driver_version); 4589 4590 driver_version = kmalloc(size, GFP_KERNEL); 4591 if (!driver_version) 4592 return -ENOMEM; 4593 4594 init_driver_version(driver_version, size); 4595 for (i = 0; i < size; i++) 4596 writeb(driver_version[i], &cfgtable->driver_version[i]); 4597 kfree(driver_version); 4598 return 0; 4599} 4600 4601static __devinit void read_driver_ver_from_cfgtable( 4602 CfgTable_struct __iomem *cfgtable, unsigned char *driver_ver) 4603{ 4604 int i; 4605 4606 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 4607 driver_ver[i] = readb(&cfgtable->driver_version[i]); 4608} 4609 4610static __devinit int controller_reset_failed( 4611 CfgTable_struct __iomem *cfgtable) 4612{ 4613 4614 char *driver_ver, *old_driver_ver; 4615 int rc, size = sizeof(cfgtable->driver_version); 4616 4617 old_driver_ver = kmalloc(2 * size, GFP_KERNEL); 4618 if (!old_driver_ver) 4619 return -ENOMEM; 4620 driver_ver = old_driver_ver + size; 4621 4622 /* After a reset, the 32 bytes of "driver version" in the cfgtable 4623 * should have been changed, otherwise we know the reset failed. 4624 */ 4625 init_driver_version(old_driver_ver, size); 4626 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 4627 rc = !memcmp(driver_ver, old_driver_ver, size); 4628 kfree(old_driver_ver); 4629 return rc; 4630} 4631 4632/* This does a hard reset of the controller using PCI power management 4633 * states or using the doorbell register. */ 4634static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) 4635{ 4636 u64 cfg_offset; 4637 u32 cfg_base_addr; 4638 u64 cfg_base_addr_index; 4639 void __iomem *vaddr; 4640 unsigned long paddr; 4641 u32 misc_fw_support; 4642 int rc; 4643 CfgTable_struct __iomem *cfgtable; 4644 u32 use_doorbell; 4645 u32 board_id; 4646 u16 command_register; 4647 4648 /* For controllers as old a the p600, this is very nearly 4649 * the same thing as 4650 * 4651 * pci_save_state(pci_dev); 4652 * pci_set_power_state(pci_dev, PCI_D3hot); 4653 * pci_set_power_state(pci_dev, PCI_D0); 4654 * pci_restore_state(pci_dev); 4655 * 4656 * For controllers newer than the P600, the pci power state 4657 * method of resetting doesn't work so we have another way 4658 * using the doorbell register. 4659 */ 4660 4661 /* Exclude 640x boards. These are two pci devices in one slot 4662 * which share a battery backed cache module. One controls the 4663 * cache, the other accesses the cache through the one that controls 4664 * it. If we reset the one controlling the cache, the other will 4665 * likely not be happy. Just forbid resetting this conjoined mess. 4666 */ 4667 cciss_lookup_board_id(pdev, &board_id); 4668 if (!ctlr_is_resettable(board_id)) { 4669 dev_warn(&pdev->dev, "Cannot reset Smart Array 640x " 4670 "due to shared cache module."); 4671 return -ENODEV; 4672 } 4673 4674 /* if controller is soft- but not hard resettable... */ 4675 if (!ctlr_is_hard_resettable(board_id)) 4676 return -ENOTSUPP; /* try soft reset later. */ 4677 4678 /* Save the PCI command register */ 4679 pci_read_config_word(pdev, 4, &command_register); 4680 /* Turn the board off. This is so that later pci_restore_state() 4681 * won't turn the board on before the rest of config space is ready. 4682 */ 4683 pci_disable_device(pdev); 4684 pci_save_state(pdev); 4685 4686 /* find the first memory BAR, so we can find the cfg table */ 4687 rc = cciss_pci_find_memory_BAR(pdev, &paddr); 4688 if (rc) 4689 return rc; 4690 vaddr = remap_pci_mem(paddr, 0x250); 4691 if (!vaddr) 4692 return -ENOMEM; 4693 4694 /* find cfgtable in order to check if reset via doorbell is supported */ 4695 rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 4696 &cfg_base_addr_index, &cfg_offset); 4697 if (rc) 4698 goto unmap_vaddr; 4699 cfgtable = remap_pci_mem(pci_resource_start(pdev, 4700 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 4701 if (!cfgtable) { 4702 rc = -ENOMEM; 4703 goto unmap_vaddr; 4704 } 4705 rc = write_driver_ver_to_cfgtable(cfgtable); 4706 if (rc) 4707 goto unmap_vaddr; 4708 4709 /* If reset via doorbell register is supported, use that. 4710 * There are two such methods. Favor the newest method. 4711 */ 4712 misc_fw_support = readl(&cfgtable->misc_fw_support); 4713 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; 4714 if (use_doorbell) { 4715 use_doorbell = DOORBELL_CTLR_RESET2; 4716 } else { 4717 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 4718 if (use_doorbell) { 4719 dev_warn(&pdev->dev, "Controller claims that " 4720 "'Bit 2 doorbell reset' is " 4721 "supported, but not 'bit 5 doorbell reset'. " 4722 "Firmware update is recommended.\n"); 4723 rc = -ENOTSUPP; /* use the soft reset */ 4724 goto unmap_cfgtable; 4725 } 4726 } 4727 4728 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); 4729 if (rc) 4730 goto unmap_cfgtable; 4731 pci_restore_state(pdev); 4732 rc = pci_enable_device(pdev); 4733 if (rc) { 4734 dev_warn(&pdev->dev, "failed to enable device.\n"); 4735 goto unmap_cfgtable; 4736 } 4737 pci_write_config_word(pdev, 4, command_register); 4738 4739 /* Some devices (notably the HP Smart Array 5i Controller) 4740 need a little pause here */ 4741 msleep(CCISS_POST_RESET_PAUSE_MSECS); 4742 4743 /* Wait for board to become not ready, then ready. */ 4744 dev_info(&pdev->dev, "Waiting for board to reset.\n"); 4745 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); 4746 if (rc) { 4747 dev_warn(&pdev->dev, "Failed waiting for board to hard reset." 4748 " Will try soft reset.\n"); 4749 rc = -ENOTSUPP; /* Not expected, but try soft reset later */ 4750 goto unmap_cfgtable; 4751 } 4752 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY); 4753 if (rc) { 4754 dev_warn(&pdev->dev, 4755 "failed waiting for board to become ready " 4756 "after hard reset\n"); 4757 goto unmap_cfgtable; 4758 } 4759 4760 rc = controller_reset_failed(vaddr); 4761 if (rc < 0) 4762 goto unmap_cfgtable; 4763 if (rc) { 4764 dev_warn(&pdev->dev, "Unable to successfully hard reset " 4765 "controller. Will try soft reset.\n"); 4766 rc = -ENOTSUPP; /* Not expected, but try soft reset later */ 4767 } else { 4768 dev_info(&pdev->dev, "Board ready after hard reset.\n"); 4769 } 4770 4771unmap_cfgtable: 4772 iounmap(cfgtable); 4773 4774unmap_vaddr: 4775 iounmap(vaddr); 4776 return rc; 4777} 4778 4779static __devinit int cciss_init_reset_devices(struct pci_dev *pdev) 4780{ 4781 int rc, i; 4782 4783 if (!reset_devices) 4784 return 0; 4785 4786 /* Reset the controller with a PCI power-cycle or via doorbell */ 4787 rc = cciss_kdump_hard_reset_controller(pdev); 4788 4789 /* -ENOTSUPP here means we cannot reset the controller 4790 * but it's already (and still) up and running in 4791 * "performant mode". Or, it might be 640x, which can't reset 4792 * due to concerns about shared bbwc between 6402/6404 pair. 4793 */ 4794 if (rc == -ENOTSUPP) 4795 return rc; /* just try to do the kdump anyhow. */ 4796 if (rc) 4797 return -ENODEV; 4798 4799 /* Now try to get the controller to respond to a no-op */ 4800 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); 4801 for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { 4802 if (cciss_noop(pdev) == 0) 4803 break; 4804 else 4805 dev_warn(&pdev->dev, "no-op failed%s\n", 4806 (i < CCISS_POST_RESET_NOOP_RETRIES - 1 ? 4807 "; re-trying" : "")); 4808 msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS); 4809 } 4810 return 0; 4811} 4812 4813static __devinit int cciss_allocate_cmd_pool(ctlr_info_t *h) 4814{ 4815 h->cmd_pool_bits = kmalloc( 4816 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * 4817 sizeof(unsigned long), GFP_KERNEL); 4818 h->cmd_pool = pci_alloc_consistent(h->pdev, 4819 h->nr_cmds * sizeof(CommandList_struct), 4820 &(h->cmd_pool_dhandle)); 4821 h->errinfo_pool = pci_alloc_consistent(h->pdev, 4822 h->nr_cmds * sizeof(ErrorInfo_struct), 4823 &(h->errinfo_pool_dhandle)); 4824 if ((h->cmd_pool_bits == NULL) 4825 || (h->cmd_pool == NULL) 4826 || (h->errinfo_pool == NULL)) { 4827 dev_err(&h->pdev->dev, "out of memory"); 4828 return -ENOMEM; 4829 } 4830 return 0; 4831} 4832 4833static __devinit int cciss_allocate_scatterlists(ctlr_info_t *h) 4834{ 4835 int i; 4836 4837 /* zero it, so that on free we need not know how many were alloc'ed */ 4838 h->scatter_list = kzalloc(h->max_commands * 4839 sizeof(struct scatterlist *), GFP_KERNEL); 4840 if (!h->scatter_list) 4841 return -ENOMEM; 4842 4843 for (i = 0; i < h->nr_cmds; i++) { 4844 h->scatter_list[i] = kmalloc(sizeof(struct scatterlist) * 4845 h->maxsgentries, GFP_KERNEL); 4846 if (h->scatter_list[i] == NULL) { 4847 dev_err(&h->pdev->dev, "could not allocate " 4848 "s/g lists\n"); 4849 return -ENOMEM; 4850 } 4851 } 4852 return 0; 4853} 4854 4855static void cciss_free_scatterlists(ctlr_info_t *h) 4856{ 4857 int i; 4858 4859 if (h->scatter_list) { 4860 for (i = 0; i < h->nr_cmds; i++) 4861 kfree(h->scatter_list[i]); 4862 kfree(h->scatter_list); 4863 } 4864} 4865 4866static void cciss_free_cmd_pool(ctlr_info_t *h) 4867{ 4868 kfree(h->cmd_pool_bits); 4869 if (h->cmd_pool) 4870 pci_free_consistent(h->pdev, 4871 h->nr_cmds * sizeof(CommandList_struct), 4872 h->cmd_pool, h->cmd_pool_dhandle); 4873 if (h->errinfo_pool) 4874 pci_free_consistent(h->pdev, 4875 h->nr_cmds * sizeof(ErrorInfo_struct), 4876 h->errinfo_pool, h->errinfo_pool_dhandle); 4877} 4878 4879static int cciss_request_irq(ctlr_info_t *h, 4880 irqreturn_t (*msixhandler)(int, void *), 4881 irqreturn_t (*intxhandler)(int, void *)) 4882{ 4883 if (h->msix_vector || h->msi_vector) { 4884 if (!request_irq(h->intr[h->intr_mode], msixhandler, 4885 0, h->devname, h)) 4886 return 0; 4887 dev_err(&h->pdev->dev, "Unable to get msi irq %d" 4888 " for %s\n", h->intr[h->intr_mode], 4889 h->devname); 4890 return -1; 4891 } 4892 4893 if (!request_irq(h->intr[h->intr_mode], intxhandler, 4894 IRQF_SHARED, h->devname, h)) 4895 return 0; 4896 dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", 4897 h->intr[h->intr_mode], h->devname); 4898 return -1; 4899} 4900 4901static int __devinit cciss_kdump_soft_reset(ctlr_info_t *h) 4902{ 4903 if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) { 4904 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); 4905 return -EIO; 4906 } 4907 4908 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 4909 if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { 4910 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 4911 return -1; 4912 } 4913 4914 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 4915 if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { 4916 dev_warn(&h->pdev->dev, "Board failed to become ready " 4917 "after soft reset.\n"); 4918 return -1; 4919 } 4920 4921 return 0; 4922} 4923 4924static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h) 4925{ 4926 int ctlr = h->ctlr; 4927 4928 free_irq(h->intr[h->intr_mode], h); 4929#ifdef CONFIG_PCI_MSI 4930 if (h->msix_vector) 4931 pci_disable_msix(h->pdev); 4932 else if (h->msi_vector) 4933 pci_disable_msi(h->pdev); 4934#endif /* CONFIG_PCI_MSI */ 4935 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 4936 cciss_free_scatterlists(h); 4937 cciss_free_cmd_pool(h); 4938 kfree(h->blockFetchTable); 4939 if (h->reply_pool) 4940 pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), 4941 h->reply_pool, h->reply_pool_dhandle); 4942 if (h->transtable) 4943 iounmap(h->transtable); 4944 if (h->cfgtable) 4945 iounmap(h->cfgtable); 4946 if (h->vaddr) 4947 iounmap(h->vaddr); 4948 unregister_blkdev(h->major, h->devname); 4949 cciss_destroy_hba_sysfs_entry(h); 4950 pci_release_regions(h->pdev); 4951 kfree(h); 4952 hba[ctlr] = NULL; 4953} 4954 4955/* 4956 * This is it. Find all the controllers and register them. I really hate 4957 * stealing all these major device numbers. 4958 * returns the number of block devices registered. 4959 */ 4960static int __devinit cciss_init_one(struct pci_dev *pdev, 4961 const struct pci_device_id *ent) 4962{ 4963 int i; 4964 int j = 0; 4965 int rc; 4966 int try_soft_reset = 0; 4967 int dac, return_code; 4968 InquiryData_struct *inq_buff; 4969 ctlr_info_t *h; 4970 unsigned long flags; 4971 4972 rc = cciss_init_reset_devices(pdev); 4973 if (rc) { 4974 if (rc != -ENOTSUPP) 4975 return rc; 4976 /* If the reset fails in a particular way (it has no way to do 4977 * a proper hard reset, so returns -ENOTSUPP) we can try to do 4978 * a soft reset once we get the controller configured up to the 4979 * point that it can accept a command. 4980 */ 4981 try_soft_reset = 1; 4982 rc = 0; 4983 } 4984 4985reinit_after_soft_reset: 4986 4987 i = alloc_cciss_hba(pdev); 4988 if (i < 0) 4989 return -1; 4990 4991 h = hba[i]; 4992 h->pdev = pdev; 4993 h->busy_initializing = 1; 4994 h->intr_mode = cciss_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 4995 INIT_LIST_HEAD(&h->cmpQ); 4996 INIT_LIST_HEAD(&h->reqQ); 4997 mutex_init(&h->busy_shutting_down); 4998 4999 if (cciss_pci_init(h) != 0) 5000 goto clean_no_release_regions; 5001 5002 sprintf(h->devname, "cciss%d", i); 5003 h->ctlr = i; 5004 5005 if (cciss_tape_cmds < 2) 5006 cciss_tape_cmds = 2; 5007 if (cciss_tape_cmds > 16) 5008 cciss_tape_cmds = 16; 5009 5010 init_completion(&h->scan_wait); 5011 5012 if (cciss_create_hba_sysfs_entry(h)) 5013 goto clean0; 5014 5015 /* configure PCI DMA stuff */ 5016 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 5017 dac = 1; 5018 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) 5019 dac = 0; 5020 else { 5021 dev_err(&h->pdev->dev, "no suitable DMA available\n"); 5022 goto clean1; 5023 } 5024 5025 /* 5026 * register with the major number, or get a dynamic major number 5027 * by passing 0 as argument. This is done for greater than 5028 * 8 controller support. 5029 */ 5030 if (i < MAX_CTLR_ORIG) 5031 h->major = COMPAQ_CISS_MAJOR + i; 5032 rc = register_blkdev(h->major, h->devname); 5033 if (rc == -EBUSY || rc == -EINVAL) { 5034 dev_err(&h->pdev->dev, 5035 "Unable to get major number %d for %s " 5036 "on hba %d\n", h->major, h->devname, i); 5037 goto clean1; 5038 } else { 5039 if (i >= MAX_CTLR_ORIG) 5040 h->major = rc; 5041 } 5042 5043 /* make sure the board interrupts are off */ 5044 h->access.set_intr_mask(h, CCISS_INTR_OFF); 5045 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx); 5046 if (rc) 5047 goto clean2; 5048 5049 dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", 5050 h->devname, pdev->device, pci_name(pdev), 5051 h->intr[h->intr_mode], dac ? "" : " not"); 5052 5053 if (cciss_allocate_cmd_pool(h)) 5054 goto clean4; 5055 5056 if (cciss_allocate_scatterlists(h)) 5057 goto clean4; 5058 5059 h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, 5060 h->chainsize, h->nr_cmds); 5061 if (!h->cmd_sg_list && h->chainsize > 0) 5062 goto clean4; 5063 5064 spin_lock_init(&h->lock); 5065 5066 /* Initialize the pdev driver private data. 5067 have it point to h. */ 5068 pci_set_drvdata(pdev, h); 5069 /* command and error info recs zeroed out before 5070 they are used */ 5071 memset(h->cmd_pool_bits, 0, 5072 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) 5073 * sizeof(unsigned long)); 5074 5075 h->num_luns = 0; 5076 h->highest_lun = -1; 5077 for (j = 0; j < CISS_MAX_LUN; j++) { 5078 h->drv[j] = NULL; 5079 h->gendisk[j] = NULL; 5080 } 5081 5082 /* At this point, the controller is ready to take commands. 5083 * Now, if reset_devices and the hard reset didn't work, try 5084 * the soft reset and see if that works. 5085 */ 5086 if (try_soft_reset) { 5087 5088 /* This is kind of gross. We may or may not get a completion 5089 * from the soft reset command, and if we do, then the value 5090 * from the fifo may or may not be valid. So, we wait 10 secs 5091 * after the reset throwing away any completions we get during 5092 * that time. Unregister the interrupt handler and register 5093 * fake ones to scoop up any residual completions. 5094 */ 5095 spin_lock_irqsave(&h->lock, flags); 5096 h->access.set_intr_mask(h, CCISS_INTR_OFF); 5097 spin_unlock_irqrestore(&h->lock, flags); 5098 free_irq(h->intr[h->intr_mode], h); 5099 rc = cciss_request_irq(h, cciss_msix_discard_completions, 5100 cciss_intx_discard_completions); 5101 if (rc) { 5102 dev_warn(&h->pdev->dev, "Failed to request_irq after " 5103 "soft reset.\n"); 5104 goto clean4; 5105 } 5106 5107 rc = cciss_kdump_soft_reset(h); 5108 if (rc) { 5109 dev_warn(&h->pdev->dev, "Soft reset failed.\n"); 5110 goto clean4; 5111 } 5112 5113 dev_info(&h->pdev->dev, "Board READY.\n"); 5114 dev_info(&h->pdev->dev, 5115 "Waiting for stale completions to drain.\n"); 5116 h->access.set_intr_mask(h, CCISS_INTR_ON); 5117 msleep(10000); 5118 h->access.set_intr_mask(h, CCISS_INTR_OFF); 5119 5120 rc = controller_reset_failed(h->cfgtable); 5121 if (rc) 5122 dev_info(&h->pdev->dev, 5123 "Soft reset appears to have failed.\n"); 5124 5125 /* since the controller's reset, we have to go back and re-init 5126 * everything. Easiest to just forget what we've done and do it 5127 * all over again. 5128 */ 5129 cciss_undo_allocations_after_kdump_soft_reset(h); 5130 try_soft_reset = 0; 5131 if (rc) 5132 /* don't go to clean4, we already unallocated */ 5133 return -ENODEV; 5134 5135 goto reinit_after_soft_reset; 5136 } 5137 5138 cciss_scsi_setup(h); 5139 5140 /* Turn the interrupts on so we can service requests */ 5141 h->access.set_intr_mask(h, CCISS_INTR_ON); 5142 5143 /* Get the firmware version */ 5144 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); 5145 if (inq_buff == NULL) { 5146 dev_err(&h->pdev->dev, "out of memory\n"); 5147 goto clean4; 5148 } 5149 5150 return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, 5151 sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD); 5152 if (return_code == IO_OK) { 5153 h->firm_ver[0] = inq_buff->data_byte[32]; 5154 h->firm_ver[1] = inq_buff->data_byte[33]; 5155 h->firm_ver[2] = inq_buff->data_byte[34]; 5156 h->firm_ver[3] = inq_buff->data_byte[35]; 5157 } else { /* send command failed */ 5158 dev_warn(&h->pdev->dev, "unable to determine firmware" 5159 " version of controller\n"); 5160 } 5161 kfree(inq_buff); 5162 5163 cciss_procinit(h); 5164 5165 h->cciss_max_sectors = 8192; 5166 5167 rebuild_lun_table(h, 1, 0); 5168 cciss_engage_scsi(h); 5169 h->busy_initializing = 0; 5170 return 1; 5171 5172clean4: 5173 cciss_free_cmd_pool(h); 5174 cciss_free_scatterlists(h); 5175 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 5176 free_irq(h->intr[h->intr_mode], h); 5177clean2: 5178 unregister_blkdev(h->major, h->devname); 5179clean1: 5180 cciss_destroy_hba_sysfs_entry(h); 5181clean0: 5182 pci_release_regions(pdev); 5183clean_no_release_regions: 5184 h->busy_initializing = 0; 5185 5186 /* 5187 * Deliberately omit pci_disable_device(): it does something nasty to 5188 * Smart Array controllers that pci_enable_device does not undo 5189 */ 5190 pci_set_drvdata(pdev, NULL); 5191 free_hba(h); 5192 return -1; 5193} 5194 5195static void cciss_shutdown(struct pci_dev *pdev) 5196{ 5197 ctlr_info_t *h; 5198 char *flush_buf; 5199 int return_code; 5200 5201 h = pci_get_drvdata(pdev); 5202 flush_buf = kzalloc(4, GFP_KERNEL); 5203 if (!flush_buf) { 5204 dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n"); 5205 return; 5206 } 5207 /* write all data in the battery backed cache to disk */ 5208 memset(flush_buf, 0, 4); 5209 return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf, 5210 4, 0, CTLR_LUNID, TYPE_CMD); 5211 kfree(flush_buf); 5212 if (return_code != IO_OK) 5213 dev_warn(&h->pdev->dev, "Error flushing cache\n"); 5214 h->access.set_intr_mask(h, CCISS_INTR_OFF); 5215 free_irq(h->intr[h->intr_mode], h); 5216} 5217 5218static int __devinit cciss_enter_simple_mode(struct ctlr_info *h) 5219{ 5220 u32 trans_support; 5221 5222 trans_support = readl(&(h->cfgtable->TransportSupport)); 5223 if (!(trans_support & SIMPLE_MODE)) 5224 return -ENOTSUPP; 5225 5226 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 5227 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 5228 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 5229 cciss_wait_for_mode_change_ack(h); 5230 print_cfg_table(h); 5231 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { 5232 dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); 5233 return -ENODEV; 5234 } 5235 h->transMethod = CFGTBL_Trans_Simple; 5236 return 0; 5237} 5238 5239 5240static void __devexit cciss_remove_one(struct pci_dev *pdev) 5241{ 5242 ctlr_info_t *h; 5243 int i, j; 5244 5245 if (pci_get_drvdata(pdev) == NULL) { 5246 dev_err(&pdev->dev, "Unable to remove device\n"); 5247 return; 5248 } 5249 5250 h = pci_get_drvdata(pdev); 5251 i = h->ctlr; 5252 if (hba[i] == NULL) { 5253 dev_err(&pdev->dev, "device appears to already be removed\n"); 5254 return; 5255 } 5256 5257 mutex_lock(&h->busy_shutting_down); 5258 5259 remove_from_scan_list(h); 5260 remove_proc_entry(h->devname, proc_cciss); 5261 unregister_blkdev(h->major, h->devname); 5262 5263 /* remove it from the disk list */ 5264 for (j = 0; j < CISS_MAX_LUN; j++) { 5265 struct gendisk *disk = h->gendisk[j]; 5266 if (disk) { 5267 struct request_queue *q = disk->queue; 5268 5269 if (disk->flags & GENHD_FL_UP) { 5270 cciss_destroy_ld_sysfs_entry(h, j, 1); 5271 del_gendisk(disk); 5272 } 5273 if (q) 5274 blk_cleanup_queue(q); 5275 } 5276 } 5277 5278#ifdef CONFIG_CISS_SCSI_TAPE 5279 cciss_unregister_scsi(h); /* unhook from SCSI subsystem */ 5280#endif 5281 5282 cciss_shutdown(pdev); 5283 5284#ifdef CONFIG_PCI_MSI 5285 if (h->msix_vector) 5286 pci_disable_msix(h->pdev); 5287 else if (h->msi_vector) 5288 pci_disable_msi(h->pdev); 5289#endif /* CONFIG_PCI_MSI */ 5290 5291 iounmap(h->transtable); 5292 iounmap(h->cfgtable); 5293 iounmap(h->vaddr); 5294 5295 cciss_free_cmd_pool(h); 5296 /* Free up sg elements */ 5297 for (j = 0; j < h->nr_cmds; j++) 5298 kfree(h->scatter_list[j]); 5299 kfree(h->scatter_list); 5300 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 5301 kfree(h->blockFetchTable); 5302 if (h->reply_pool) 5303 pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), 5304 h->reply_pool, h->reply_pool_dhandle); 5305 /* 5306 * Deliberately omit pci_disable_device(): it does something nasty to 5307 * Smart Array controllers that pci_enable_device does not undo 5308 */ 5309 pci_release_regions(pdev); 5310 pci_set_drvdata(pdev, NULL); 5311 cciss_destroy_hba_sysfs_entry(h); 5312 mutex_unlock(&h->busy_shutting_down); 5313 free_hba(h); 5314} 5315 5316static struct pci_driver cciss_pci_driver = { 5317 .name = "cciss", 5318 .probe = cciss_init_one, 5319 .remove = __devexit_p(cciss_remove_one), 5320 .id_table = cciss_pci_device_id, /* id_table */ 5321 .shutdown = cciss_shutdown, 5322}; 5323 5324/* 5325 * This is it. Register the PCI driver information for the cards we control 5326 * the OS will call our registered routines when it finds one of our cards. 5327 */ 5328static int __init cciss_init(void) 5329{ 5330 int err; 5331 5332 /* 5333 * The hardware requires that commands are aligned on a 64-bit 5334 * boundary. Given that we use pci_alloc_consistent() to allocate an 5335 * array of them, the size must be a multiple of 8 bytes. 5336 */ 5337 BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); 5338 printk(KERN_INFO DRIVER_NAME "\n"); 5339 5340 err = bus_register(&cciss_bus_type); 5341 if (err) 5342 return err; 5343 5344 /* Start the scan thread */ 5345 cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan"); 5346 if (IS_ERR(cciss_scan_thread)) { 5347 err = PTR_ERR(cciss_scan_thread); 5348 goto err_bus_unregister; 5349 } 5350 5351 /* Register for our PCI devices */ 5352 err = pci_register_driver(&cciss_pci_driver); 5353 if (err) 5354 goto err_thread_stop; 5355 5356 return err; 5357 5358err_thread_stop: 5359 kthread_stop(cciss_scan_thread); 5360err_bus_unregister: 5361 bus_unregister(&cciss_bus_type); 5362 5363 return err; 5364} 5365 5366static void __exit cciss_cleanup(void) 5367{ 5368 int i; 5369 5370 pci_unregister_driver(&cciss_pci_driver); 5371 /* double check that all controller entrys have been removed */ 5372 for (i = 0; i < MAX_CTLR; i++) { 5373 if (hba[i] != NULL) { 5374 dev_warn(&hba[i]->pdev->dev, 5375 "had to remove controller\n"); 5376 cciss_remove_one(hba[i]->pdev); 5377 } 5378 } 5379 kthread_stop(cciss_scan_thread); 5380 if (proc_cciss) 5381 remove_proc_entry("driver/cciss", NULL); 5382 bus_unregister(&cciss_bus_type); 5383} 5384 5385module_init(cciss_init); 5386module_exit(cciss_cleanup); 5387