virthba.c revision dc95086172dbbad107e9ac7fd09666c824fd86e3
1/* virthba.c 2 * 3 * Copyright � 2010 - 2013 UNISYS CORPORATION 4 * All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or (at 9 * your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 14 * NON INFRINGEMENT. See the GNU General Public License for more 15 * details. 16 */ 17 18#define EXPORT_SYMTAB 19 20/* if you want to turn on some debugging of write device data or read 21 * device data, define these two undefs. You will probably want to 22 * customize the code which is here since it was written assuming 23 * reading and writing a specific data file df.64M.txt which is a 24 * 64Megabyte file created by Art Nilson using a scritp I wrote called 25 * cr_test_data.pl. The data file consists of 256 byte lines of text 26 * which start with an 8 digit sequence number, a colon, and then 27 * letters after that */ 28 29#undef DBGINF 30 31#include <linux/kernel.h> 32#ifdef CONFIG_MODVERSIONS 33#include <config/modversions.h> 34#endif 35 36#include "uniklog.h" 37#include "diagnostics/appos_subsystems.h" 38#include "uisutils.h" 39#include "uisqueue.h" 40#include "uisthread.h" 41 42#include <linux/module.h> 43#include <linux/init.h> 44#include <linux/pci.h> 45#include <linux/spinlock.h> 46#include <linux/device.h> 47#include <linux/slab.h> 48#include <scsi/scsi.h> 49#include <scsi/scsi_host.h> 50#include <scsi/scsi_cmnd.h> 51#include <scsi/scsi_device.h> 52#include <asm/param.h> 53#include <linux/proc_fs.h> 54#include <linux/types.h> 55 56#include "virthba.h" 57#include "virtpci.h" 58#include "visorchipset.h" 59#include "version.h" 60#include "guestlinuxdebug.h" 61/* this is shorter than using __FILE__ (full path name) in 62 * debug/info/error messages 63 */ 64#define CURRENT_FILE_PC VIRT_HBA_PC_virthba_c 65#define __MYFILE__ "virthba.c" 66 67/* NOTE: L1_CACHE_BYTES >=128 */ 68#define DEVICE_ATTRIBUTE struct device_attribute 69 70/*****************************************************/ 71/* Forward declarations */ 72/*****************************************************/ 73static int virthba_probe(struct virtpci_dev *dev, 74 const struct pci_device_id *id); 75static void virthba_remove(struct virtpci_dev *dev); 76static int virthba_abort_handler(struct scsi_cmnd *scsicmd); 77static int virthba_bus_reset_handler(struct scsi_cmnd *scsicmd); 78static int virthba_device_reset_handler(struct scsi_cmnd *scsicmd); 79static int virthba_host_reset_handler(struct scsi_cmnd *scsicmd); 80static const char *virthba_get_info(struct Scsi_Host *shp); 81static int virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 82static int virthba_queue_command_lck(struct scsi_cmnd *scsicmd, 83 void (*virthba_cmnd_done)(struct scsi_cmnd *)); 84#ifdef DEF_SCSI_QCMD 85DEF_SCSI_QCMD(virthba_queue_command) 86#else 87#define virthba_queue_command virthba_queue_command_lck 88#endif 89static int virthba_slave_alloc(struct scsi_device *scsidev); 90static int virthba_slave_configure(struct scsi_device *scsidev); 91static void virthba_slave_destroy(struct scsi_device *scsidev); 92static int process_incoming_rsps(void *); 93static int virthba_serverup(struct virtpci_dev *virtpcidev); 94static int virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state); 95static void doDiskAddRemove(struct work_struct *work); 96static void virthba_serverdown_complete(struct work_struct *work); 97 98static ssize_t info_proc_read(struct file *file, char __user *buf, 99 size_t len, loff_t *offset); 100static ssize_t rqwu_proc_write(struct file *file, const char __user *buffer, 101 size_t count, loff_t *ppos); 102static ssize_t enable_ints_read(struct file *file, char __user *buffer, 103 size_t count, loff_t *ppos); 104static ssize_t enable_ints_write(struct file *file, const char __user *buffer, 105 size_t count, loff_t *ppos); 106 107/*****************************************************/ 108/* Globals */ 109/*****************************************************/ 110 111int rsltq_wait_usecs = 4000; /* Default 4ms */ 112static unsigned int MaxBuffLen; 113 114/* Module options */ 115char *virthba_options = "NONE"; 116 117static const struct pci_device_id virthba_id_table[] = { 118 {PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_VIRTHBA)}, 119 {0}, 120}; 121 122/* export virthba_id_table */ 123MODULE_DEVICE_TABLE(pci, virthba_id_table); 124 125static struct workqueue_struct *virthba_serverdown_workqueue; 126 127static struct virtpci_driver virthba_driver = { 128 .name = "uisvirthba", 129 .version = VERSION, 130 .vertag = NULL, 131 .build_date = __DATE__, 132 .build_time = __TIME__, 133 .id_table = virthba_id_table, 134 .probe = virthba_probe, 135 .remove = virthba_remove, 136 .resume = virthba_serverup, 137 .suspend = virthba_serverdown 138}; 139 140/* The Send and Recive Buffers of the IO Queue may both be full */ 141#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS*2) 142#define INTERRUPT_VECTOR_MASK 0x3F 143 144struct scsipending { 145 char cmdtype; /* Type of pointer that is being stored */ 146 void *sent; /* The Data being tracked */ 147 /* struct scsi_cmnd *type for virthba_queue_command */ 148 /* struct uiscmdrsp *type for management commands */ 149}; 150 151#define VIRTHBA_ERROR_COUNT 30 152#define IOS_ERROR_THRESHOLD 1000 153struct virtdisk_info { 154 U32 valid; 155 U32 channel, id, lun; /* Disk Path */ 156 atomic_t ios_threshold; 157 atomic_t error_count; 158 struct virtdisk_info *next; 159}; 160/* Each Scsi_Host has a host_data area that contains this struct. */ 161struct virthba_info { 162 struct Scsi_Host *scsihost; 163 struct virtpci_dev *virtpcidev; 164 struct list_head dev_info_list; 165 struct chaninfo chinfo; 166 struct InterruptInfo intr; /* use recvInterrupt info to receive 167 interrupts when IOs complete */ 168 int interrupt_vector; 169 struct scsipending pending[MAX_PENDING_REQUESTS]; /* Tracks the requests 170 that have been */ 171 /* forwarded to the IOVM and haven't returned yet */ 172 unsigned int nextinsert; /* Start search for next pending 173 free slot here */ 174 spinlock_t privlock; 175 bool serverdown; 176 bool serverchangingstate; 177 unsigned long long acquire_failed_cnt; 178 unsigned long long interrupts_rcvd; 179 unsigned long long interrupts_notme; 180 unsigned long long interrupts_disabled; 181 struct work_struct serverdown_completion; 182 U64 *flags_addr; 183 atomic_t interrupt_rcvd; 184 wait_queue_head_t rsp_queue; 185 struct virtdisk_info head; 186}; 187 188/* Work Data for DARWorkQ */ 189struct diskaddremove { 190 U8 add; /* 0-remove, 1-add */ 191 struct Scsi_Host *shost; /* Scsi Host for this virthba instance */ 192 U32 channel, id, lun; /* Disk Path */ 193 struct diskaddremove *next; 194}; 195 196#define virtpci_dev_to_virthba_virthba_get_info(d) \ 197 container_of(d, struct virthba_info, virtpcidev) 198 199static DEVICE_ATTRIBUTE *virthba_shost_attrs[]; 200static struct scsi_host_template virthba_driver_template = { 201 .name = "Unisys Virtual HBA", 202 .proc_name = "uisvirthba", 203 .info = virthba_get_info, 204 .ioctl = virthba_ioctl, 205 .queuecommand = virthba_queue_command, 206 .eh_abort_handler = virthba_abort_handler, 207 .eh_device_reset_handler = virthba_device_reset_handler, 208 .eh_bus_reset_handler = virthba_bus_reset_handler, 209 .eh_host_reset_handler = virthba_host_reset_handler, 210 .shost_attrs = virthba_shost_attrs, 211 212#define VIRTHBA_MAX_CMNDS 128 213 .can_queue = VIRTHBA_MAX_CMNDS, 214 .sg_tablesize = 64, /* largest number of address/length pairs */ 215 .this_id = -1, 216 .slave_alloc = virthba_slave_alloc, 217 .slave_configure = virthba_slave_configure, 218 .slave_destroy = virthba_slave_destroy, 219 .use_clustering = ENABLE_CLUSTERING, 220}; 221 222struct virthba_devices_open { 223 struct virthba_info *virthbainfo; 224}; 225 226static const struct file_operations proc_info_fops = { 227 .read = info_proc_read, 228}; 229 230static const struct file_operations proc_rqwu_fops = { 231 .write = rqwu_proc_write, 232}; 233 234static const struct file_operations proc_enable_ints_fops = { 235 .read = enable_ints_read, 236 .write = enable_ints_write, 237}; 238 239 240#define VIRTHBASOPENMAX 1 241/* array of open devices maintained by open() and close(); */ 242static struct virthba_devices_open VirtHbasOpen[VIRTHBASOPENMAX]; 243static struct proc_dir_entry *virthba_proc_dir; 244static struct proc_dir_entry *info_proc_entry; 245static struct proc_dir_entry *rqwaitus_proc_entry; 246static struct proc_dir_entry *enable_ints_proc_entry; 247#define INFO_PROC_ENTRY_FN "info" 248#define ENABLE_INTS_ENTRY_FN "enable_ints" 249#define RQWU_PROC_ENTRY_FN "rqwait_usecs" 250#define DIR_PROC_ENTRY "virthba" 251 252/*****************************************************/ 253/* Local Functions */ 254/*****************************************************/ 255static int 256add_scsipending_entry(struct virthba_info *vhbainfo, char cmdtype, void *new) 257{ 258 unsigned long flags; 259 int insert_location; 260 261 spin_lock_irqsave(&vhbainfo->privlock, flags); 262 insert_location = vhbainfo->nextinsert; 263 while (vhbainfo->pending[insert_location].sent != NULL) { 264 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS; 265 if (insert_location == (int) vhbainfo->nextinsert) { 266 LOGERR("Queue should be full. insert_location<<%d>> Unable to find open slot for pending commands.\n", 267 insert_location); 268 spin_unlock_irqrestore(&vhbainfo->privlock, flags); 269 return -1; 270 } 271 } 272 273 vhbainfo->pending[insert_location].cmdtype = cmdtype; 274 vhbainfo->pending[insert_location].sent = new; 275 vhbainfo->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS; 276 spin_unlock_irqrestore(&vhbainfo->privlock, flags); 277 278 return insert_location; 279} 280 281static unsigned int 282add_scsipending_entry_with_wait(struct virthba_info *vhbainfo, char cmdtype, 283 void *new) 284{ 285 int insert_location = add_scsipending_entry(vhbainfo, cmdtype, new); 286 287 while (insert_location == -1) { 288 LOGERR("Failed to find empty queue slot. Waiting to try again\n"); 289 set_current_state(TASK_INTERRUPTIBLE); 290 schedule_timeout(msecs_to_jiffies(10)); 291 insert_location = add_scsipending_entry(vhbainfo, cmdtype, new); 292 } 293 294 return (unsigned int) insert_location; 295} 296 297static void * 298del_scsipending_entry(struct virthba_info *vhbainfo, uintptr_t del) 299{ 300 unsigned long flags; 301 void *sent = NULL; 302 303 if (del >= MAX_PENDING_REQUESTS) { 304 LOGERR("Invalid queue position <<%lu>> given to delete. MAX_PENDING_REQUESTS <<%d>>\n", 305 (unsigned long) del, MAX_PENDING_REQUESTS); 306 } else { 307 spin_lock_irqsave(&vhbainfo->privlock, flags); 308 309 if (vhbainfo->pending[del].sent == NULL) 310 LOGERR("Deleting already cleared queue entry at <<%lu>>.\n", 311 (unsigned long) del); 312 313 sent = vhbainfo->pending[del].sent; 314 315 vhbainfo->pending[del].cmdtype = 0; 316 vhbainfo->pending[del].sent = NULL; 317 spin_unlock_irqrestore(&vhbainfo->privlock, flags); 318 } 319 320 return sent; 321} 322 323/* DARWorkQ (Disk Add/Remove) */ 324static struct work_struct DARWorkQ; 325struct diskaddremove *DARWorkQHead = NULL; 326spinlock_t DARWorkQLock; 327unsigned short DARWorkQSched = 0; 328#define QUEUE_DISKADDREMOVE(dar) { \ 329 spin_lock_irqsave(&DARWorkQLock, flags); \ 330 if (!DARWorkQHead) { \ 331 DARWorkQHead = dar; \ 332 dar->next = NULL; \ 333 } \ 334 else { \ 335 dar->next = DARWorkQHead; \ 336 DARWorkQHead = dar; \ 337 } \ 338 if (!DARWorkQSched) { \ 339 schedule_work(&DARWorkQ); \ 340 DARWorkQSched = 1; \ 341 } \ 342 spin_unlock_irqrestore(&DARWorkQLock, flags); \ 343} 344 345static inline void 346SendDiskAddRemove(struct diskaddremove *dar) 347{ 348 struct scsi_device *sdev; 349 int error; 350 351 sdev = scsi_device_lookup(dar->shost, dar->channel, dar->id, dar->lun); 352 if (sdev) { 353 if (!(dar->add)) 354 scsi_remove_device(sdev); 355 } else if (dar->add) { 356 error = 357 scsi_add_device(dar->shost, dar->channel, dar->id, 358 dar->lun); 359 if (error) 360 LOGERR("Failed scsi_add_device: host_no=%d[chan=%d:id=%d:lun=%d]\n", 361 dar->shost->host_no, dar->channel, dar->id, 362 dar->lun); 363 } else 364 LOGERR("Failed scsi_device_lookup:[chan=%d:id=%d:lun=%d]\n", 365 dar->channel, dar->id, dar->lun); 366 kfree(dar); 367} 368 369/*****************************************************/ 370/* DARWorkQ Handler Thread */ 371/*****************************************************/ 372static void 373doDiskAddRemove(struct work_struct *work) 374{ 375 struct diskaddremove *dar; 376 struct diskaddremove *tmphead; 377 int i = 0; 378 unsigned long flags; 379 380 spin_lock_irqsave(&DARWorkQLock, flags); 381 tmphead = DARWorkQHead; 382 DARWorkQHead = NULL; 383 DARWorkQSched = 0; 384 spin_unlock_irqrestore(&DARWorkQLock, flags); 385 while (tmphead) { 386 dar = tmphead; 387 tmphead = dar->next; 388 SendDiskAddRemove(dar); 389 i++; 390 } 391} 392 393/*****************************************************/ 394/* Routine to add entry to DARWorkQ */ 395/*****************************************************/ 396static void 397process_disk_notify(struct Scsi_Host *shost, struct uiscmdrsp *cmdrsp) 398{ 399 struct diskaddremove *dar; 400 unsigned long flags; 401 402 dar = kmalloc(sizeof(struct diskaddremove), GFP_ATOMIC); 403 if (dar) { 404 memset(dar, 0, sizeof(struct diskaddremove)); 405 dar->add = cmdrsp->disknotify.add; 406 dar->shost = shost; 407 dar->channel = cmdrsp->disknotify.channel; 408 dar->id = cmdrsp->disknotify.id; 409 dar->lun = cmdrsp->disknotify.lun; 410 QUEUE_DISKADDREMOVE(dar); 411 } else { 412 LOGERR("kmalloc failed for dar. host_no=%d[chan=%d:id=%d:lun=%d]\n", 413 shost->host_no, cmdrsp->disknotify.channel, 414 cmdrsp->disknotify.id, cmdrsp->disknotify.lun); 415 } 416} 417 418/*****************************************************/ 419/* Probe Remove Functions */ 420/*****************************************************/ 421irqreturn_t 422virthba_ISR(int irq, void *dev_id) 423{ 424 struct virthba_info *virthbainfo = (struct virthba_info *) dev_id; 425 pCHANNEL_HEADER pChannelHeader; 426 pSIGNAL_QUEUE_HEADER pqhdr; 427 U64 mask; 428 unsigned long long rc1; 429 430 if (virthbainfo == NULL) 431 return IRQ_NONE; 432 virthbainfo->interrupts_rcvd++; 433 pChannelHeader = virthbainfo->chinfo.queueinfo->chan; 434 if (((pChannelHeader->Features 435 & ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS) != 0) 436 && ((pChannelHeader->Features & ULTRA_IO_DRIVER_DISABLES_INTS) != 437 0)) { 438 virthbainfo->interrupts_disabled++; 439 mask = ~ULTRA_CHANNEL_ENABLE_INTS; 440 rc1 = uisqueue_InterlockedAnd(virthbainfo->flags_addr, mask); 441 } 442 if (SignalQueueIsEmpty(pChannelHeader, IOCHAN_FROM_IOPART)) { 443 virthbainfo->interrupts_notme++; 444 return IRQ_NONE; 445 } 446 pqhdr = (pSIGNAL_QUEUE_HEADER) ((char *) pChannelHeader + 447 pChannelHeader->oChannelSpace) + 448 IOCHAN_FROM_IOPART; 449 pqhdr->NumInterruptsReceived++; 450 atomic_set(&virthbainfo->interrupt_rcvd, 1); 451 wake_up_interruptible(&virthbainfo->rsp_queue); 452 return IRQ_HANDLED; 453} 454 455static int 456virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id) 457{ 458 int error; 459 struct Scsi_Host *scsihost; 460 struct virthba_info *virthbainfo; 461 int rsp; 462 int i; 463 irq_handler_t handler = virthba_ISR; 464 pCHANNEL_HEADER pChannelHeader; 465 pSIGNAL_QUEUE_HEADER pqhdr; 466 U64 mask; 467 468 LOGVER("entering virthba_probe...\n"); 469 LOGVER("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo, 470 virtpcidev->deviceNo); 471 472 LOGINF("entering virthba_probe...\n"); 473 LOGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo, 474 virtpcidev->deviceNo); 475 POSTCODE_LINUX_2(VHBA_PROBE_ENTRY_PC, POSTCODE_SEVERITY_INFO); 476 /* call scsi_host_alloc to register a scsi host adapter 477 * instance - this virthba that has just been created is an 478 * instance of a scsi host adapter. This scsi_host_alloc 479 * function allocates a new Scsi_Host struct & performs basic 480 * initializatoin. The host is not published to the scsi 481 * midlayer until scsi_add_host is called. 482 */ 483 DBGINF("calling scsi_host_alloc.\n"); 484 485 /* arg 2 passed in length of extra space we want allocated 486 * with scsi_host struct for our own use scsi_host_alloc 487 * assign host_no 488 */ 489 scsihost = scsi_host_alloc(&virthba_driver_template, 490 sizeof(struct virthba_info)); 491 if (scsihost == NULL) 492 return -ENODEV; 493 494 DBGINF("scsihost: 0x%p, scsihost->this_id: %d, host_no: %d.\n", 495 scsihost, scsihost->this_id, scsihost->host_no); 496 497 scsihost->this_id = UIS_MAGIC_VHBA; 498 /* linux treats max-channel differently than max-id & max-lun. 499 * In the latter cases, those two values result in 0 to max-1 500 * (inclusive) being scanned. But in the case of channels, the 501 * scan is 0 to max (inclusive); so we will subtract one from 502 * the max-channel value. 503 */ 504 LOGINF("virtpcidev->scsi.max.max_channel=%u, max_id=%u, max_lun=%u, cmd_per_lun=%u, max_io_size=%u\n", 505 (unsigned) virtpcidev->scsi.max.max_channel - 1, 506 (unsigned) virtpcidev->scsi.max.max_id, 507 (unsigned) virtpcidev->scsi.max.max_lun, 508 (unsigned) virtpcidev->scsi.max.cmd_per_lun, 509 (unsigned) virtpcidev->scsi.max.max_io_size); 510 scsihost->max_channel = (unsigned) virtpcidev->scsi.max.max_channel; 511 scsihost->max_id = (unsigned) virtpcidev->scsi.max.max_id; 512 scsihost->max_lun = (unsigned) virtpcidev->scsi.max.max_lun; 513 scsihost->cmd_per_lun = (unsigned) virtpcidev->scsi.max.cmd_per_lun; 514 scsihost->max_sectors = 515 (unsigned short) (virtpcidev->scsi.max.max_io_size >> 9); 516 scsihost->sg_tablesize = 517 (unsigned short) (virtpcidev->scsi.max.max_io_size / PAGE_SIZE); 518 if (scsihost->sg_tablesize > MAX_PHYS_INFO) 519 scsihost->sg_tablesize = MAX_PHYS_INFO; 520 LOGINF("scsihost->max_channel=%u, max_id=%u, max_lun=%u, cmd_per_lun=%u, max_sectors=%hu, sg_tablesize=%hu\n", 521 scsihost->max_channel, scsihost->max_id, scsihost->max_lun, 522 scsihost->cmd_per_lun, scsihost->max_sectors, 523 scsihost->sg_tablesize); 524 LOGINF("scsihost->can_queue=%u, scsihost->cmd_per_lun=%u, max_sectors=%hu, sg_tablesize=%hu\n", 525 scsihost->can_queue, scsihost->cmd_per_lun, scsihost->max_sectors, 526 scsihost->sg_tablesize); 527 528 DBGINF("calling scsi_add_host\n"); 529 530 /* this creates "host%d" in sysfs. If 2nd argument is NULL, 531 * then this generic /sys/devices/platform/host? device is 532 * created and /sys/scsi_host/host? -> 533 * /sys/devices/platform/host? If 2nd argument is not NULL, 534 * then this generic /sys/devices/<path>/host? is created and 535 * host? points to that device instead. 536 */ 537 error = scsi_add_host(scsihost, &virtpcidev->generic_dev); 538 if (error) { 539 LOGERR("scsi_add_host ****FAILED 0x%x TBD - RECOVER\n", error); 540 POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR); 541 /* decr refcount on scsihost which was incremented by 542 * scsi_add_host so the scsi_host gets deleted 543 */ 544 scsi_host_put(scsihost); 545 return -ENODEV; 546 } 547 548 virthbainfo = (struct virthba_info *) scsihost->hostdata; 549 memset(virthbainfo, 0, sizeof(struct virthba_info)); 550 for (i = 0; i < VIRTHBASOPENMAX; i++) { 551 if (VirtHbasOpen[i].virthbainfo == NULL) { 552 VirtHbasOpen[i].virthbainfo = virthbainfo; 553 break; 554 } 555 } 556 virthbainfo->interrupt_vector = -1; 557 virthbainfo->chinfo.queueinfo = &virtpcidev->queueinfo; 558 virthbainfo->virtpcidev = virtpcidev; 559 spin_lock_init(&virthbainfo->chinfo.insertlock); 560 561 DBGINF("generic_dev: 0x%p, queueinfo: 0x%p.\n", 562 &virtpcidev->generic_dev, &virtpcidev->queueinfo); 563 564 init_waitqueue_head(&virthbainfo->rsp_queue); 565 spin_lock_init(&virthbainfo->privlock); 566 memset(&virthbainfo->pending, 0, sizeof(virthbainfo->pending)); 567 virthbainfo->serverdown = false; 568 virthbainfo->serverchangingstate = false; 569 570 virthbainfo->intr = virtpcidev->intr; 571 /* save of host within virthba_info */ 572 virthbainfo->scsihost = scsihost; 573 574 /* save of host within virtpci_dev */ 575 virtpcidev->scsi.scsihost = scsihost; 576 577 /* Setup workqueue for serverdown messages */ 578 INIT_WORK(&virthbainfo->serverdown_completion, 579 virthba_serverdown_complete); 580 581 virthbainfo->chinfo.queueinfo->chan->Features |= 582 ULTRA_IO_CHANNEL_IS_POLLING; 583 /* start thread that will receive scsicmnd responses */ 584 DBGINF("starting rsp thread -- queueinfo: 0x%p, threadinfo: 0x%p.\n", 585 virthbainfo->chinfo.queueinfo, &virthbainfo->chinfo.threadinfo); 586 587 pChannelHeader = virthbainfo->chinfo.queueinfo->chan; 588 pqhdr = (pSIGNAL_QUEUE_HEADER) ((char *) pChannelHeader + 589 pChannelHeader->oChannelSpace) + 590 IOCHAN_FROM_IOPART; 591 virthbainfo->flags_addr = &pqhdr->FeatureFlags; 592 593 if (!uisthread_start(&virthbainfo->chinfo.threadinfo, 594 process_incoming_rsps, 595 virthbainfo, "vhba_incoming")) { 596 LOGERR("uisthread_start rsp ****FAILED\n"); 597 /* decr refcount on scsihost which was incremented by 598 * scsi_add_host so the scsi_host gets deleted 599 */ 600 POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR); 601 scsi_host_put(scsihost); 602 return -ENODEV; 603 } 604 LOGINF("sendInterruptHandle=0x%16llX", 605 virthbainfo->intr.sendInterruptHandle); 606 LOGINF("recvInterruptHandle=0x%16llX", 607 virthbainfo->intr.recvInterruptHandle); 608 LOGINF("recvInterruptVector=0x%8X", 609 virthbainfo->intr.recvInterruptVector); 610 LOGINF("recvInterruptShared=0x%2X", 611 virthbainfo->intr.recvInterruptShared); 612 LOGINF("scsihost.hostt->name=%s", scsihost->hostt->name); 613 virthbainfo->interrupt_vector = 614 virthbainfo->intr.recvInterruptHandle & INTERRUPT_VECTOR_MASK; 615 rsp = request_irq(virthbainfo->interrupt_vector, handler, IRQF_SHARED, 616 scsihost->hostt->name, virthbainfo); 617 if (rsp != 0) { 618 LOGERR("request_irq(%d) uislib_virthba_ISR request failed with rsp=%d\n", 619 virthbainfo->interrupt_vector, rsp); 620 virthbainfo->interrupt_vector = -1; 621 POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR); 622 } else { 623 U64 *Features_addr = 624 &virthbainfo->chinfo.queueinfo->chan->Features; 625 LOGERR("request_irq(%d) uislib_virthba_ISR request succeeded\n", 626 virthbainfo->interrupt_vector); 627 mask = ~(ULTRA_IO_CHANNEL_IS_POLLING | 628 ULTRA_IO_DRIVER_DISABLES_INTS); 629 uisqueue_InterlockedAnd(Features_addr, mask); 630 mask = ULTRA_IO_DRIVER_ENABLES_INTS; 631 uisqueue_InterlockedOr(Features_addr, mask); 632 rsltq_wait_usecs = 4000000; 633 } 634 635 DBGINF("calling scsi_scan_host.\n"); 636 scsi_scan_host(scsihost); 637 DBGINF("return from scsi_scan_host.\n"); 638 639 LOGINF("virthba added scsihost:0x%p\n", scsihost); 640 POSTCODE_LINUX_2(VHBA_PROBE_EXIT_PC, POSTCODE_SEVERITY_INFO); 641 return 0; 642} 643 644static void 645virthba_remove(struct virtpci_dev *virtpcidev) 646{ 647 struct virthba_info *virthbainfo; 648 struct Scsi_Host *scsihost = 649 (struct Scsi_Host *) virtpcidev->scsi.scsihost; 650 651 LOGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo, 652 virtpcidev->deviceNo); 653 virthbainfo = (struct virthba_info *) scsihost->hostdata; 654 if (virthbainfo->interrupt_vector != -1) 655 free_irq(virthbainfo->interrupt_vector, virthbainfo); 656 LOGINF("Removing virtpcidev: 0x%p, virthbainfo: 0x%p\n", virtpcidev, 657 virthbainfo); 658 659 DBGINF("removing scsihost: 0x%p, scsihost->this_id: %d\n", scsihost, 660 scsihost->this_id); 661 scsi_remove_host(scsihost); 662 663 DBGINF("stopping thread.\n"); 664 uisthread_stop(&virthbainfo->chinfo.threadinfo); 665 666 DBGINF("calling scsi_host_put\n"); 667 668 /* decr refcount on scsihost which was incremented by 669 * scsi_add_host so the scsi_host gets deleted 670 */ 671 scsi_host_put(scsihost); 672 LOGINF("virthba removed scsi_host.\n"); 673} 674 675static int 676forward_vdiskmgmt_command(VDISK_MGMT_TYPES vdiskcmdtype, 677 struct Scsi_Host *scsihost, 678 struct uisscsi_dest *vdest) 679{ 680 struct uiscmdrsp *cmdrsp; 681 struct virthba_info *virthbainfo = 682 (struct virthba_info *) scsihost->hostdata; 683 int notifyresult = 0xffff; 684 wait_queue_head_t notifyevent; 685 686 LOGINF("vDiskMgmt:%d %d:%d:%d\n", vdiskcmdtype, 687 vdest->channel, vdest->id, vdest->lun); 688 689 if (virthbainfo->serverdown || virthbainfo->serverchangingstate) { 690 DBGINF("Server is down/changing state. Returning Failure.\n"); 691 return FAILED; 692 } 693 694 ALLOC_CMDRSP(cmdrsp); 695 if (cmdrsp == NULL) { 696 LOGERR("kmalloc of cmdrsp failed.\n"); 697 return FAILED; /* reject */ 698 } 699 700 init_waitqueue_head(¬ifyevent); 701 702 /* issue VDISK_MGMT_CMD 703 * set type to command - as opposed to task mgmt 704 */ 705 cmdrsp->cmdtype = CMD_VDISKMGMT_TYPE; 706 /* specify the event that has to be triggered when this cmd is 707 * complete 708 */ 709 cmdrsp->vdiskmgmt.notify = (void *) ¬ifyevent; 710 cmdrsp->vdiskmgmt.notifyresult = (void *) ¬ifyresult; 711 712 /* save destination */ 713 cmdrsp->vdiskmgmt.vdisktype = vdiskcmdtype; 714 cmdrsp->vdiskmgmt.vdest.channel = vdest->channel; 715 cmdrsp->vdiskmgmt.vdest.id = vdest->id; 716 cmdrsp->vdiskmgmt.vdest.lun = vdest->lun; 717 cmdrsp->vdiskmgmt.scsicmd = 718 (void *) (uintptr_t) 719 add_scsipending_entry_with_wait(virthbainfo, CMD_VDISKMGMT_TYPE, 720 (void *) cmdrsp); 721 722 uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo, 723 cmdrsp, IOCHAN_TO_IOPART, 724 &virthbainfo->chinfo.insertlock, 725 DONT_ISSUE_INTERRUPT, (U64) NULL, 726 OK_TO_WAIT, "vhba"); 727 LOGINF("VdiskMgmt waiting on event notifyevent=0x%p\n", 728 cmdrsp->scsitaskmgmt.notify); 729 wait_event(notifyevent, notifyresult != 0xffff); 730 LOGINF("VdiskMgmt complete; result:%d\n", cmdrsp->vdiskmgmt.result); 731 kfree(cmdrsp); 732 return SUCCESS; 733} 734 735/*****************************************************/ 736/* Scsi Host support functions */ 737/*****************************************************/ 738 739static int 740forward_taskmgmt_command(TASK_MGMT_TYPES tasktype, struct scsi_device *scsidev) 741{ 742 struct uiscmdrsp *cmdrsp; 743 struct virthba_info *virthbainfo = 744 (struct virthba_info *) scsidev->host->hostdata; 745 int notifyresult = 0xffff; 746 wait_queue_head_t notifyevent; 747 748 LOGINF("TaskMgmt:%d %d:%d:%d\n", tasktype, 749 scsidev->channel, scsidev->id, scsidev->lun); 750 751 if (virthbainfo->serverdown || virthbainfo->serverchangingstate) { 752 DBGINF("Server is down/changing state. Returning Failure.\n"); 753 return FAILED; 754 } 755 756 ALLOC_CMDRSP(cmdrsp); 757 if (cmdrsp == NULL) { 758 LOGERR("kmalloc of cmdrsp failed.\n"); 759 return FAILED; /* reject */ 760 } 761 762 init_waitqueue_head(¬ifyevent); 763 764 /* issue TASK_MGMT_ABORT_TASK */ 765 /* set type to command - as opposed to task mgmt */ 766 cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE; 767 /* specify the event that has to be triggered when this */ 768 /* cmd is complete */ 769 cmdrsp->scsitaskmgmt.notify = (void *) ¬ifyevent; 770 cmdrsp->scsitaskmgmt.notifyresult = (void *) ¬ifyresult; 771 772 /* save destination */ 773 cmdrsp->scsitaskmgmt.tasktype = tasktype; 774 cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel; 775 cmdrsp->scsitaskmgmt.vdest.id = scsidev->id; 776 cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun; 777 cmdrsp->scsitaskmgmt.scsicmd = 778 (void *) (uintptr_t) 779 add_scsipending_entry_with_wait(virthbainfo, 780 CMD_SCSITASKMGMT_TYPE, 781 (void *) cmdrsp); 782 783 uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo, 784 cmdrsp, IOCHAN_TO_IOPART, 785 &virthbainfo->chinfo.insertlock, 786 DONT_ISSUE_INTERRUPT, (U64) NULL, 787 OK_TO_WAIT, "vhba"); 788 LOGINF("TaskMgmt waiting on event notifyevent=0x%p\n", 789 cmdrsp->scsitaskmgmt.notify); 790 wait_event(notifyevent, notifyresult != 0xffff); 791 LOGINF("TaskMgmt complete; result:%d\n", cmdrsp->scsitaskmgmt.result); 792 kfree(cmdrsp); 793 return SUCCESS; 794} 795 796/* The abort handler returns SUCCESS if it has succeeded to make LLDD 797 * and all related hardware forget about the scmd. 798 */ 799static int 800virthba_abort_handler(struct scsi_cmnd *scsicmd) 801{ 802 /* issue TASK_MGMT_ABORT_TASK */ 803 struct scsi_device *scsidev; 804 struct virtdisk_info *vdisk; 805 806 scsidev = scsicmd->device; 807 for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head; 808 vdisk->next; vdisk = vdisk->next) { 809 if ((scsidev->channel == vdisk->channel) 810 && (scsidev->id == vdisk->id) 811 && (scsidev->lun == vdisk->lun)) { 812 if (atomic_read(&vdisk->error_count) < 813 VIRTHBA_ERROR_COUNT) { 814 atomic_inc(&vdisk->error_count); 815 POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC, 816 POSTCODE_SEVERITY_INFO); 817 } else 818 atomic_set(&vdisk->ios_threshold, 819 IOS_ERROR_THRESHOLD); 820 } 821 } 822 return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd->device); 823} 824 825static int 826virthba_bus_reset_handler(struct scsi_cmnd *scsicmd) 827{ 828 /* issue TASK_MGMT_TARGET_RESET for each target on the bus */ 829 struct scsi_device *scsidev; 830 struct virtdisk_info *vdisk; 831 832 scsidev = scsicmd->device; 833 for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head; 834 vdisk->next; vdisk = vdisk->next) { 835 if ((scsidev->channel == vdisk->channel) 836 && (scsidev->id == vdisk->id) 837 && (scsidev->lun == vdisk->lun)) { 838 if (atomic_read(&vdisk->error_count) < 839 VIRTHBA_ERROR_COUNT) { 840 atomic_inc(&vdisk->error_count); 841 POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC, 842 POSTCODE_SEVERITY_INFO); 843 } else 844 atomic_set(&vdisk->ios_threshold, 845 IOS_ERROR_THRESHOLD); 846 } 847 } 848 return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd->device); 849} 850 851static int 852virthba_device_reset_handler(struct scsi_cmnd *scsicmd) 853{ 854 /* issue TASK_MGMT_LUN_RESET */ 855 struct scsi_device *scsidev; 856 struct virtdisk_info *vdisk; 857 858 scsidev = scsicmd->device; 859 for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head; 860 vdisk->next; vdisk = vdisk->next) { 861 if ((scsidev->channel == vdisk->channel) 862 && (scsidev->id == vdisk->id) 863 && (scsidev->lun == vdisk->lun)) { 864 if (atomic_read(&vdisk->error_count) < 865 VIRTHBA_ERROR_COUNT) { 866 atomic_inc(&vdisk->error_count); 867 POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC, 868 POSTCODE_SEVERITY_INFO); 869 } else 870 atomic_set(&vdisk->ios_threshold, 871 IOS_ERROR_THRESHOLD); 872 } 873 } 874 return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd->device); 875} 876 877static int 878virthba_host_reset_handler(struct scsi_cmnd *scsicmd) 879{ 880 /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */ 881 LOGERR("virthba_host_reset_handler Not yet implemented\n"); 882 return SUCCESS; 883} 884 885static char virthba_get_info_str[256]; 886 887static const char * 888virthba_get_info(struct Scsi_Host *shp) 889{ 890 /* Return version string */ 891 sprintf(virthba_get_info_str, "virthba, version %s\n", VIRTHBA_VERSION); 892 return virthba_get_info_str; 893} 894 895static int 896virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 897{ 898 DBGINF("In virthba_ioctl: ioctl: cmd=0x%x\n", cmd); 899 return -EINVAL; 900} 901 902/* This returns SCSI_MLQUEUE_DEVICE_BUSY if the signal queue to IOpart 903 * is full. 904 */ 905static int 906virthba_queue_command_lck(struct scsi_cmnd *scsicmd, 907 void (*virthba_cmnd_done)(struct scsi_cmnd *)) 908{ 909 struct scsi_device *scsidev = scsicmd->device; 910 int insert_location; 911 unsigned char op; 912 unsigned char *cdb = scsicmd->cmnd; 913 struct Scsi_Host *scsihost = scsidev->host; 914 struct uiscmdrsp *cmdrsp; 915 unsigned int i; 916 struct virthba_info *virthbainfo = 917 (struct virthba_info *) scsihost->hostdata; 918 struct scatterlist *sg = NULL; 919 struct scatterlist *sgl = NULL; 920 int sg_failed = 0; 921 922 if (virthbainfo->serverdown || virthbainfo->serverchangingstate) { 923 DBGINF("Server is down/changing state. Returning SCSI_MLQUEUE_DEVICE_BUSY.\n"); 924 return SCSI_MLQUEUE_DEVICE_BUSY; 925 } 926 927 ALLOC_CMDRSP(cmdrsp); 928 if (cmdrsp == NULL) { 929 LOGERR("kmalloc of cmdrsp failed.\n"); 930 return 1; /* reject the command */ 931 } 932 933 /* now saving everything we need from scsi_cmd into cmdrsp 934 * before we queue cmdrsp set type to command - as opposed to 935 * task mgmt 936 */ 937 cmdrsp->cmdtype = CMD_SCSI_TYPE; 938 /* save the pending insertion location. Deletion from pending 939 * will return the scsicmd pointer for completion 940 */ 941 insert_location = 942 add_scsipending_entry(virthbainfo, CMD_SCSI_TYPE, (void *) scsicmd); 943 if (insert_location != -1) { 944 cmdrsp->scsi.scsicmd = (void *) (uintptr_t) insert_location; 945 } else { 946 LOGERR("Queue is full. Returning busy.\n"); 947 kfree(cmdrsp); 948 return SCSI_MLQUEUE_DEVICE_BUSY; 949 } 950 /* save done function that we have call when cmd is complete */ 951 scsicmd->scsi_done = virthba_cmnd_done; 952 /* save destination */ 953 cmdrsp->scsi.vdest.channel = scsidev->channel; 954 cmdrsp->scsi.vdest.id = scsidev->id; 955 cmdrsp->scsi.vdest.lun = scsidev->lun; 956 /* save datadir */ 957 cmdrsp->scsi.data_dir = scsicmd->sc_data_direction; 958 memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE); 959 960 cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd); 961 962 /* keep track of the max buffer length so far. */ 963 if (cmdrsp->scsi.bufflen > MaxBuffLen) 964 MaxBuffLen = cmdrsp->scsi.bufflen; 965 966 if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO) { 967 LOGERR("scsicmd use_sg:%d greater than MAX:%d\n", 968 scsi_sg_count(scsicmd), MAX_PHYS_INFO); 969 del_scsipending_entry(virthbainfo, (uintptr_t) insert_location); 970 kfree(cmdrsp); 971 return 1; /* reject the command */ 972 } 973 974 /* This is what we USED to do when we assumed we were running */ 975 /* uissd & virthba on the same Linux system. */ 976 /* cmdrsp->scsi.buffer = scsicmd->request_buffer; */ 977 /* The following code does NOT make that assumption. */ 978 /* convert buffer to phys information */ 979 if (scsi_sg_count(scsicmd) == 0) { 980 if (scsi_bufflen(scsicmd) > 0) { 981 LOGERR("**** FAILED No scatter list for bufflen > 0\n"); 982 BUG_ON(scsi_sg_count(scsicmd) == 0); 983 } 984 DBGINF("No sg; buffer:0x%p bufflen:%d\n", 985 scsi_sglist(scsicmd), scsi_bufflen(scsicmd)); 986 } else { 987 /* buffer is scatterlist - copy it out */ 988 sgl = scsi_sglist(scsicmd); 989 990 for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) { 991 992 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg); 993 cmdrsp->scsi.gpi_list[i].length = sg->length; 994 if ((i != 0) && (sg->offset != 0)) 995 LOGINF("Offset on a sg_entry other than zero =<<%d>>.\n", 996 sg->offset); 997 } 998 999 if (sg_failed) { 1000 LOGERR("Start sg_list dump (entries %d, bufflen %d)...\n", 1001 scsi_sg_count(scsicmd), cmdrsp->scsi.bufflen); 1002 for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) { 1003 LOGERR(" Entry(%d): page->[0x%p], phys->[0x%Lx], off(%d), len(%d)\n", 1004 i, sg_page(sg), 1005 (unsigned long long) sg_phys(sg), 1006 sg->offset, sg->length); 1007 } 1008 LOGERR("Done sg_list dump.\n"); 1009 /* BUG(); ***** For now, let it fail in uissd 1010 * if it is a problem, as it might just 1011 * work 1012 */ 1013 } 1014 1015 cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd); 1016 } 1017 1018 op = cdb[0]; 1019 i = uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo, 1020 cmdrsp, IOCHAN_TO_IOPART, 1021 &virthbainfo->chinfo. 1022 insertlock, 1023 DONT_ISSUE_INTERRUPT, 1024 (U64) NULL, DONT_WAIT, "vhba"); 1025 if (i == 0) { 1026 /* queue must be full - and we said don't wait - return busy */ 1027 LOGERR("uisqueue_put_cmdrsp_with_lock ****FAILED\n"); 1028 kfree(cmdrsp); 1029 del_scsipending_entry(virthbainfo, (uintptr_t) insert_location); 1030 return SCSI_MLQUEUE_DEVICE_BUSY; 1031 } 1032 1033 /* we're done with cmdrsp space - data from it has been copied 1034 * into channel - free it now. 1035 */ 1036 kfree(cmdrsp); 1037 return 0; /* non-zero implies host/device is busy */ 1038} 1039 1040static int 1041virthba_slave_alloc(struct scsi_device *scsidev) 1042{ 1043 /* this called by the midlayer before scan for new devices - 1044 * LLD can alloc any struc & do init if needed. 1045 */ 1046 struct virtdisk_info *vdisk; 1047 struct virtdisk_info *tmpvdisk; 1048 struct virthba_info *virthbainfo; 1049 struct Scsi_Host *scsihost = (struct Scsi_Host *) scsidev->host; 1050 1051 virthbainfo = (struct virthba_info *) scsihost->hostdata; 1052 if (!virthbainfo) { 1053 LOGERR("Could not find virthba_info for scsihost\n"); 1054 return 0; /* even though we errored, treat as success */ 1055 } 1056 for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) { 1057 if (vdisk->next->valid && 1058 (vdisk->next->channel == scsidev->channel) && 1059 (vdisk->next->id == scsidev->id) && 1060 (vdisk->next->lun == scsidev->lun)) 1061 return 0; 1062 } 1063 tmpvdisk = kmalloc(sizeof(struct virtdisk_info), GFP_ATOMIC); 1064 if (!tmpvdisk) { /* error allocating */ 1065 LOGERR("Could not allocate memory for disk\n"); 1066 return 0; 1067 } 1068 memset(tmpvdisk, 0, sizeof(struct virtdisk_info)); 1069 tmpvdisk->channel = scsidev->channel; 1070 tmpvdisk->id = scsidev->id; 1071 tmpvdisk->lun = scsidev->lun; 1072 tmpvdisk->valid = 1; 1073 vdisk->next = tmpvdisk; 1074 return 0; /* success */ 1075} 1076 1077static int 1078virthba_slave_configure(struct scsi_device *scsidev) 1079{ 1080 return 0; /* success */ 1081} 1082 1083static void 1084virthba_slave_destroy(struct scsi_device *scsidev) 1085{ 1086 /* midlevel calls this after device has been quiesced and 1087 * before it is to be deleted. 1088 */ 1089 struct virtdisk_info *vdisk, *delvdisk; 1090 struct virthba_info *virthbainfo; 1091 struct Scsi_Host *scsihost = (struct Scsi_Host *) scsidev->host; 1092 1093 virthbainfo = (struct virthba_info *) scsihost->hostdata; 1094 if (!virthbainfo) 1095 LOGERR("Could not find virthba_info for scsihost\n"); 1096 for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) { 1097 if (vdisk->next->valid && 1098 (vdisk->next->channel == scsidev->channel) && 1099 (vdisk->next->id == scsidev->id) && 1100 (vdisk->next->lun == scsidev->lun)) { 1101 delvdisk = vdisk->next; 1102 vdisk->next = vdisk->next->next; 1103 kfree(delvdisk); 1104 return; 1105 } 1106 } 1107 return; 1108} 1109 1110/*****************************************************/ 1111/* Scsi Cmnd support thread */ 1112/*****************************************************/ 1113 1114static void 1115do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) 1116{ 1117 struct virtdisk_info *vdisk; 1118 struct scsi_device *scsidev; 1119 struct sense_data *sd; 1120 1121 scsidev = scsicmd->device; 1122 memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE); 1123 sd = (struct sense_data *) scsicmd->sense_buffer; 1124 1125 /* Do not log errors for disk-not-present inquiries */ 1126 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) && 1127 (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) && 1128 (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)) 1129 return; 1130 1131 /* Okay see what our error_count is here.... */ 1132 for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head; 1133 vdisk->next; vdisk = vdisk->next) { 1134 if ((scsidev->channel != vdisk->channel) 1135 || (scsidev->id != vdisk->id) 1136 || (scsidev->lun != vdisk->lun)) 1137 continue; 1138 1139 if (atomic_read(&vdisk->error_count) < VIRTHBA_ERROR_COUNT) { 1140 atomic_inc(&vdisk->error_count); 1141 LOGERR("SCSICMD ****FAILED scsicmd:0x%p op:0x%x <%d:%d:%d:%d> 0x%x-0x%x-0x%x-0x%x-0x%x.\n", 1142 scsicmd, cmdrsp->scsi.cmnd[0], 1143 scsidev->host->host_no, scsidev->id, 1144 scsidev->channel, scsidev->lun, 1145 cmdrsp->scsi.linuxstat, sd->Valid, sd->SenseKey, 1146 sd->AdditionalSenseCode, 1147 sd->AdditionalSenseCodeQualifier); 1148 if (atomic_read(&vdisk->error_count) == 1149 VIRTHBA_ERROR_COUNT) { 1150 LOGERR("Throtling SCSICMD errors disk <%d:%d:%d:%d>\n", 1151 scsidev->host->host_no, scsidev->id, 1152 scsidev->channel, scsidev->lun); 1153 } 1154 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD); 1155 } 1156 } 1157} 1158 1159static void 1160do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) 1161{ 1162 struct scsi_device *scsidev; 1163 unsigned char buf[36]; 1164 struct scatterlist *sg; 1165 unsigned int i; 1166 char *thispage; 1167 char *thispage_orig; 1168 int bufind = 0; 1169 struct virtdisk_info *vdisk; 1170 1171 scsidev = scsicmd->device; 1172 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) 1173 && (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) { 1174 if (cmdrsp->scsi.no_disk_result == 0) 1175 return; 1176 1177 /* Linux scsi code is weird; it wants 1178 * a device at Lun 0 to issue report 1179 * luns, but we don't want a disk 1180 * there so we'll present a processor 1181 * there. */ 1182 SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen, 1183 scsidev->lun, 1184 DEV_DISK_CAPABLE_NOT_PRESENT, 1185 DEV_NOT_CAPABLE); 1186 1187 if (scsi_sg_count(scsicmd) == 0) { 1188 if (scsi_bufflen(scsicmd) > 0) { 1189 LOGERR("**** FAILED No scatter list for bufflen > 0\n"); 1190 BUG_ON(scsi_sg_count(scsicmd) == 1191 0); 1192 } 1193 memcpy(scsi_sglist(scsicmd), buf, 1194 cmdrsp->scsi.bufflen); 1195 return; 1196 } 1197 1198 sg = scsi_sglist(scsicmd); 1199 for (i = 0; i < scsi_sg_count(scsicmd); i++) { 1200 DBGVER("copying OUT OF buf into 0x%p %d\n", 1201 sg_page(sg + i), sg[i].length); 1202 thispage_orig = kmap_atomic(sg_page(sg + i)); 1203 thispage = (void *) ((unsigned long)thispage_orig | 1204 sg[i].offset); 1205 memcpy(thispage, buf + bufind, sg[i].length); 1206 kunmap_atomic(thispage_orig); 1207 bufind += sg[i].length; 1208 } 1209 } else { 1210 1211 vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head; 1212 for ( ; vdisk->next; vdisk = vdisk->next) { 1213 if ((scsidev->channel != vdisk->channel) 1214 || (scsidev->id != vdisk->id) 1215 || (scsidev->lun != vdisk->lun)) 1216 continue; 1217 1218 if (atomic_read(&vdisk->ios_threshold) > 0) { 1219 atomic_dec(&vdisk->ios_threshold); 1220 if (atomic_read(&vdisk->ios_threshold) == 0) { 1221 LOGERR("Resetting error count for disk\n"); 1222 atomic_set(&vdisk->error_count, 0); 1223 } 1224 } 1225 } 1226 } 1227} 1228 1229static void 1230complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) 1231{ 1232 DBGINF("cmdrsp: 0x%p, scsistat:0x%x.\n", cmdrsp, cmdrsp->scsi.scsistat); 1233 1234 /* take what we need out of cmdrsp and complete the scsicmd */ 1235 scsicmd->result = cmdrsp->scsi.linuxstat; 1236 if (cmdrsp->scsi.linuxstat) 1237 do_scsi_linuxstat(cmdrsp, scsicmd); 1238 else 1239 do_scsi_nolinuxstat(cmdrsp, scsicmd); 1240 1241 if (scsicmd->scsi_done) { 1242 DBGVER("Scsi_DONE\n"); 1243 scsicmd->scsi_done(scsicmd); 1244 } 1245} 1246 1247static inline void 1248complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp) 1249{ 1250 /* copy the result of the taskmgmt and */ 1251 /* wake up the error handler that is waiting for this */ 1252 *(int *) cmdrsp->vdiskmgmt.notifyresult = cmdrsp->vdiskmgmt.result; 1253 wake_up_all((wait_queue_head_t *) cmdrsp->vdiskmgmt.notify); 1254 LOGINF("set notify result to %d\n", cmdrsp->vdiskmgmt.result); 1255} 1256 1257static inline void 1258complete_taskmgmt_command(struct uiscmdrsp *cmdrsp) 1259{ 1260 /* copy the result of the taskmgmt and */ 1261 /* wake up the error handler that is waiting for this */ 1262 *(int *) cmdrsp->scsitaskmgmt.notifyresult = 1263 cmdrsp->scsitaskmgmt.result; 1264 wake_up_all((wait_queue_head_t *) cmdrsp->scsitaskmgmt.notify); 1265 LOGINF("set notify result to %d\n", cmdrsp->scsitaskmgmt.result); 1266} 1267 1268static void 1269drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc, 1270 struct uiscmdrsp *cmdrsp) 1271{ 1272 unsigned long flags; 1273 int qrslt = 0; 1274 struct scsi_cmnd *scsicmd; 1275 struct Scsi_Host *shost = virthbainfo->scsihost; 1276 1277 while (1) { 1278 spin_lock_irqsave(&virthbainfo->chinfo.insertlock, flags); 1279 if (!ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(dc->queueinfo->chan, 1280 "vhba", NULL)) { 1281 spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock, 1282 flags); 1283 virthbainfo->acquire_failed_cnt++; 1284 break; 1285 } 1286 qrslt = uisqueue_get_cmdrsp(dc->queueinfo, cmdrsp, 1287 IOCHAN_FROM_IOPART); 1288 ULTRA_CHANNEL_CLIENT_RELEASE_OS(dc->queueinfo->chan, 1289 "vhba", NULL); 1290 spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock, flags); 1291 if (qrslt == 0) 1292 break; 1293 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) { 1294 /* scsicmd location is returned by the 1295 * deletion 1296 */ 1297 scsicmd = del_scsipending_entry(virthbainfo, 1298 (uintptr_t) cmdrsp->scsi.scsicmd); 1299 if (!scsicmd) 1300 break; 1301 /* complete the orig cmd */ 1302 complete_scsi_command(cmdrsp, scsicmd); 1303 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) { 1304 if (!del_scsipending_entry(virthbainfo, 1305 (uintptr_t) cmdrsp->scsitaskmgmt.scsicmd)) 1306 break; 1307 complete_taskmgmt_command(cmdrsp); 1308 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) { 1309 /* The vHba pointer has no meaning in 1310 * a Client/Guest Partition. Let's be 1311 * safe and set it to NULL now. Do 1312 * not use it here! */ 1313 cmdrsp->disknotify.vHba = NULL; 1314 process_disk_notify(shost, cmdrsp); 1315 } else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) { 1316 if (!del_scsipending_entry(virthbainfo, 1317 (uintptr_t) cmdrsp->vdiskmgmt.scsicmd)) 1318 break; 1319 complete_vdiskmgmt_command(cmdrsp); 1320 } else 1321 LOGERR("Invalid cmdtype %d\n", cmdrsp->cmdtype); 1322 /* cmdrsp is now available for reuse */ 1323 } 1324} 1325 1326 1327/* main function for the thread that waits for scsi commands to arrive 1328 * in a specified queue 1329 */ 1330static int 1331process_incoming_rsps(void *v) 1332{ 1333 struct virthba_info *virthbainfo = v; 1334 struct chaninfo *dc = &virthbainfo->chinfo; 1335 struct uiscmdrsp *cmdrsp = NULL; 1336 const int SZ = sizeof(struct uiscmdrsp); 1337 U64 mask; 1338 unsigned long long rc1; 1339 1340 UIS_DAEMONIZE("vhba_incoming"); 1341 /* alloc once and reuse */ 1342 cmdrsp = kmalloc(SZ, GFP_ATOMIC); 1343 if (cmdrsp == NULL) { 1344 LOGERR("process_incoming_rsps ****FAILED to malloc - thread exiting\n"); 1345 complete_and_exit(&dc->threadinfo.has_stopped, 0); 1346 return 0; 1347 } 1348 mask = ULTRA_CHANNEL_ENABLE_INTS; 1349 while (1) { 1350 wait_event_interruptible_timeout(virthbainfo->rsp_queue, 1351 (atomic_read(&virthbainfo->interrupt_rcvd) == 1), 1352 usecs_to_jiffies(rsltq_wait_usecs)); 1353 atomic_set(&virthbainfo->interrupt_rcvd, 0); 1354 /* drain queue */ 1355 drain_queue(virthbainfo, dc, cmdrsp); 1356 rc1 = uisqueue_InterlockedOr(virthbainfo->flags_addr, mask); 1357 if (dc->threadinfo.should_stop) 1358 break; 1359 } 1360 1361 kfree(cmdrsp); 1362 1363 DBGINF("exiting processing incoming rsps.\n"); 1364 complete_and_exit(&dc->threadinfo.has_stopped, 0); 1365} 1366 1367/*****************************************************/ 1368/* proc filesystem functions */ 1369/*****************************************************/ 1370 1371static ssize_t 1372info_proc_read(struct file *file, char __user *buf, size_t len, loff_t *offset) 1373{ 1374 int length = 0; 1375 U64 phys_flags_addr; 1376 int i; 1377 struct virthba_info *virthbainfo; 1378 char *vbuf; 1379 loff_t pos = *offset; 1380 1381 if (pos < 0) 1382 return -EINVAL; 1383 1384 if (pos > 0 || !len) 1385 return 0; 1386 1387 vbuf = kzalloc(len, GFP_KERNEL); 1388 if (!vbuf) 1389 return -ENOMEM; 1390 1391 for (i = 0; i < VIRTHBASOPENMAX; i++) { 1392 if (VirtHbasOpen[i].virthbainfo == NULL) 1393 continue; 1394 1395 virthbainfo = VirtHbasOpen[i].virthbainfo; 1396 length += sprintf(vbuf + length, "CHANSOCK is not defined.\n"); 1397 1398 length += sprintf(vbuf + length, "MaxBuffLen:%d\n", MaxBuffLen); 1399 1400 length += sprintf(vbuf + length, "\nvirthba result queue poll wait:%d usecs.\n", 1401 rsltq_wait_usecs); 1402 1403 length += sprintf(vbuf + length, 1404 "\nModule build: Date:%s Time:%s\n", 1405 __DATE__, __TIME__); 1406 length += sprintf(vbuf + length, "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n", 1407 virthbainfo->interrupts_rcvd, 1408 virthbainfo->interrupts_disabled); 1409 length += sprintf(vbuf + length, "\ninterrupts_notme = %llu,\n", 1410 virthbainfo->interrupts_notme); 1411 phys_flags_addr = virt_to_phys(virthbainfo->flags_addr); 1412 1413 length += sprintf(vbuf + length, "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n", 1414 virthbainfo->flags_addr, phys_flags_addr, 1415 *virthbainfo->flags_addr); 1416 length += sprintf(vbuf + length, "acquire_failed_cnt:%llu\n", 1417 virthbainfo->acquire_failed_cnt); 1418 1419 length += sprintf(vbuf + length, "\n"); 1420 } 1421 if (copy_to_user(buf, vbuf, length)) { 1422 kfree(vbuf); 1423 return -EFAULT; 1424 } 1425 1426 kfree(vbuf); 1427 *offset += length; 1428 return length; 1429} 1430 1431static ssize_t 1432enable_ints_read(struct file *file, char __user *buffer, 1433 size_t count, loff_t *ppos) 1434{ 1435 return 0; 1436} 1437 1438static ssize_t 1439enable_ints_write(struct file *file, const char __user *buffer, 1440 size_t count, loff_t *ppos) 1441{ 1442 char buf[count + 1]; 1443 int i, new_value; 1444 struct virthba_info *virthbainfo; 1445 U64 *Features_addr; 1446 U64 mask; 1447 1448 buf[count] = '\0'; 1449 if (copy_from_user(buf, buffer, count)) { 1450 LOGERR("copy_from_user failed. buf<<%.*s>> count<<%lu>>\n", 1451 (int) count, buf, count); 1452 return -EFAULT; 1453 } 1454 1455 i = sscanf(buf, "%d", &new_value); 1456 1457 if (i < 1) { 1458 LOGERR("Failed to scan value for enable_ints, buf<<%.*s>>", 1459 (int) count, buf); 1460 return -EFAULT; 1461 } 1462 1463 /* set all counts to new_value usually 0 */ 1464 for (i = 0; i < VIRTHBASOPENMAX; i++) { 1465 if (VirtHbasOpen[i].virthbainfo != NULL) { 1466 virthbainfo = VirtHbasOpen[i].virthbainfo; 1467 Features_addr = 1468 &virthbainfo->chinfo.queueinfo->chan->Features; 1469 if (new_value == 1) { 1470 mask = ~(ULTRA_IO_CHANNEL_IS_POLLING | 1471 ULTRA_IO_DRIVER_DISABLES_INTS); 1472 uisqueue_InterlockedAnd(Features_addr, mask); 1473 mask = ULTRA_IO_DRIVER_ENABLES_INTS; 1474 uisqueue_InterlockedOr(Features_addr, mask); 1475 rsltq_wait_usecs = 4000000; 1476 } else { 1477 mask = ~(ULTRA_IO_DRIVER_ENABLES_INTS | 1478 ULTRA_IO_DRIVER_DISABLES_INTS); 1479 uisqueue_InterlockedAnd(Features_addr, mask); 1480 mask = ULTRA_IO_CHANNEL_IS_POLLING; 1481 uisqueue_InterlockedOr(Features_addr, mask); 1482 rsltq_wait_usecs = 4000; 1483 } 1484 } 1485 } 1486 return count; 1487} 1488 1489static ssize_t 1490rqwu_proc_write(struct file *file, const char __user *buffer, 1491 size_t count, loff_t *ppos) 1492{ 1493 char buf[count]; 1494 int i, usecs; 1495 1496 if (copy_from_user(buf, buffer, count)) { 1497 LOGERR("copy_from_user failed. buf<<%.*s>> count<<%lu>>\n", 1498 (int) count, buf, count); 1499 return -EFAULT; 1500 } 1501 1502 i = sscanf(buf, "%d", &usecs); 1503 1504 if (i < 1) { 1505 LOGERR("Failed to scan value for rqwait_usecs buf<<%.*s>>", 1506 (int) count, buf); 1507 return -EFAULT; 1508 } 1509 1510 /* set global wait time */ 1511 rsltq_wait_usecs = usecs; 1512 return count; 1513} 1514 1515/* As per VirtpciFunc returns 1 for success and 0 for failure */ 1516static int 1517virthba_serverup(struct virtpci_dev *virtpcidev) 1518{ 1519 struct virthba_info *virthbainfo = 1520 (struct virthba_info *) ((struct Scsi_Host *) virtpcidev->scsi. 1521 scsihost)->hostdata; 1522 1523 DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo, 1524 virtpcidev->deviceNo); 1525 1526 if (!virthbainfo->serverdown) { 1527 DBGINF("Server up message recieved while server is already up.\n"); 1528 return 1; 1529 } 1530 if (virthbainfo->serverchangingstate) { 1531 LOGERR("Server already processing change state message\n"); 1532 return 0; 1533 } 1534 1535 virthbainfo->serverchangingstate = true; 1536 /* Must transition channel to ATTACHED state BEFORE we 1537 * can start using the device again 1538 */ 1539 ULTRA_CHANNEL_CLIENT_TRANSITION(virthbainfo->chinfo.queueinfo->chan, 1540 dev_name(&virtpcidev->generic_dev), 1541 CliStateOS, 1542 CHANNELCLI_ATTACHED, NULL); 1543 1544 /* Start Processing the IOVM Response Queue Again */ 1545 if (!uisthread_start(&virthbainfo->chinfo.threadinfo, 1546 process_incoming_rsps, 1547 virthbainfo, "vhba_incoming")) { 1548 LOGERR("uisthread_start rsp ****FAILED\n"); 1549 return 0; 1550 } 1551 virthbainfo->serverdown = false; 1552 virthbainfo->serverchangingstate = false; 1553 1554 return 1; 1555} 1556 1557static void 1558virthba_serverdown_complete(struct work_struct *work) 1559{ 1560 struct virthba_info *virthbainfo; 1561 struct virtpci_dev *virtpcidev; 1562 int i; 1563 struct scsipending *pendingdel = NULL; 1564 struct scsi_cmnd *scsicmd = NULL; 1565 struct uiscmdrsp *cmdrsp; 1566 unsigned long flags; 1567 1568 virthbainfo = container_of(work, struct virthba_info, 1569 serverdown_completion); 1570 1571 /* Stop Using the IOVM Response Queue (queue should be drained 1572 * by the end) 1573 */ 1574 uisthread_stop(&virthbainfo->chinfo.threadinfo); 1575 1576 /* Fail Commands that weren't completed */ 1577 spin_lock_irqsave(&virthbainfo->privlock, flags); 1578 for (i = 0; i < MAX_PENDING_REQUESTS; i++) { 1579 pendingdel = &(virthbainfo->pending[i]); 1580 switch (pendingdel->cmdtype) { 1581 case CMD_SCSI_TYPE: 1582 scsicmd = (struct scsi_cmnd *) pendingdel->sent; 1583 scsicmd->result = (DID_RESET << 16); 1584 if (scsicmd->scsi_done) 1585 scsicmd->scsi_done(scsicmd); 1586 break; 1587 case CMD_SCSITASKMGMT_TYPE: 1588 cmdrsp = (struct uiscmdrsp *) pendingdel->sent; 1589 DBGINF("cmdrsp=0x%x, notify=0x%x\n", cmdrsp, 1590 cmdrsp->scsitaskmgmt.notify); 1591 *(int *) cmdrsp->scsitaskmgmt.notifyresult = 1592 TASK_MGMT_FAILED; 1593 wake_up_all((wait_queue_head_t *) 1594 cmdrsp->scsitaskmgmt.notify); 1595 break; 1596 case CMD_VDISKMGMT_TYPE: 1597 cmdrsp = (struct uiscmdrsp *) pendingdel->sent; 1598 *(int *) cmdrsp->vdiskmgmt.notifyresult = 1599 VDISK_MGMT_FAILED; 1600 wake_up_all((wait_queue_head_t *) 1601 cmdrsp->vdiskmgmt.notify); 1602 break; 1603 default: 1604 if (pendingdel->sent != NULL) 1605 LOGERR("Unknown command type: 0x%x. Only freeing list structure.\n", 1606 pendingdel->cmdtype); 1607 } 1608 pendingdel->cmdtype = 0; 1609 pendingdel->sent = NULL; 1610 } 1611 spin_unlock_irqrestore(&virthbainfo->privlock, flags); 1612 1613 virtpcidev = virthbainfo->virtpcidev; 1614 1615 DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo, 1616 virtpcidev->deviceNo); 1617 virthbainfo->serverdown = true; 1618 virthbainfo->serverchangingstate = false; 1619 /* Return the ServerDown response to Command */ 1620 device_pause_response(virtpcidev->busNo, virtpcidev->deviceNo, 0); 1621} 1622 1623/* As per VirtpciFunc returns 1 for success and 0 for failure */ 1624static int 1625virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state) 1626{ 1627 struct virthba_info *virthbainfo = 1628 (struct virthba_info *) ((struct Scsi_Host *) virtpcidev->scsi. 1629 scsihost)->hostdata; 1630 1631 DBGINF("virthba_serverdown"); 1632 DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo, 1633 virtpcidev->deviceNo); 1634 1635 if (!virthbainfo->serverdown && !virthbainfo->serverchangingstate) { 1636 virthbainfo->serverchangingstate = true; 1637 queue_work(virthba_serverdown_workqueue, 1638 &virthbainfo->serverdown_completion); 1639 } else if (virthbainfo->serverchangingstate) { 1640 LOGERR("Server already processing change state message\n"); 1641 return 0; 1642 } else 1643 LOGERR("Server already down, but another server down message received."); 1644 1645 return 1; 1646} 1647 1648/*****************************************************/ 1649/* Module Init & Exit functions */ 1650/*****************************************************/ 1651 1652static int __init 1653virthba_parse_line(char *str) 1654{ 1655 DBGINF("In virthba_parse_line %s\n", str); 1656 return 1; 1657} 1658 1659static void __init 1660virthba_parse_options(char *line) 1661{ 1662 char *next = line; 1663 1664 POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO); 1665 if (line == NULL || !*line) 1666 return; 1667 while ((line = next) != NULL) { 1668 next = strchr(line, ' '); 1669 if (next != NULL) 1670 *next++ = 0; 1671 if (!virthba_parse_line(line)) 1672 DBGINF("Unknown option '%s'\n", line); 1673 } 1674 1675 POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO); 1676} 1677 1678static int __init 1679virthba_mod_init(void) 1680{ 1681 int error; 1682 int i; 1683 1684 LOGINF("Entering virthba_mod_init...\n"); 1685 1686 POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO); 1687 virthba_parse_options(virthba_options); 1688 1689 error = virtpci_register_driver(&virthba_driver); 1690 if (error < 0) { 1691 LOGERR("register ****FAILED 0x%x\n", error); 1692 POSTCODE_LINUX_3(VHBA_CREATE_FAILURE_PC, error, 1693 POSTCODE_SEVERITY_ERR); 1694 } else { 1695 /* create the proc directories */ 1696 virthba_proc_dir = proc_mkdir(DIR_PROC_ENTRY, NULL); 1697 info_proc_entry = proc_create(INFO_PROC_ENTRY_FN, 0, 1698 virthba_proc_dir, 1699 &proc_info_fops); 1700 rqwaitus_proc_entry = proc_create(RQWU_PROC_ENTRY_FN, 0, 1701 virthba_proc_dir, 1702 &proc_rqwu_fops); 1703 enable_ints_proc_entry = proc_create(ENABLE_INTS_ENTRY_FN, 0, 1704 virthba_proc_dir, 1705 &proc_enable_ints_fops); 1706 1707 /* Initialize DARWorkQ */ 1708 INIT_WORK(&DARWorkQ, doDiskAddRemove); 1709 spin_lock_init(&DARWorkQLock); 1710 1711 /* clear out array */ 1712 for (i = 0; i < VIRTHBASOPENMAX; i++) 1713 VirtHbasOpen[i].virthbainfo = NULL; 1714 /* Initialize the serverdown workqueue */ 1715 virthba_serverdown_workqueue = 1716 create_singlethread_workqueue("virthba_serverdown"); 1717 if (virthba_serverdown_workqueue == NULL) { 1718 LOGERR("**** FAILED virthba_serverdown_workqueue creation\n"); 1719 POSTCODE_LINUX_2(VHBA_CREATE_FAILURE_PC, 1720 POSTCODE_SEVERITY_ERR); 1721 error = -1; 1722 } 1723 } 1724 1725 POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO); 1726 LOGINF("Leaving virthba_mod_init\n"); 1727 return error; 1728} 1729 1730static ssize_t 1731virthba_acquire_lun(struct device *cdev, struct device_attribute *attr, 1732 const char *buf, size_t count) 1733{ 1734 struct uisscsi_dest vdest; 1735 struct Scsi_Host *shost = class_to_shost(cdev); 1736 int i; 1737 1738 i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun); 1739 if (i != 3) 1740 return i; 1741 1742 return forward_vdiskmgmt_command(VDISK_MGMT_ACQUIRE, shost, &vdest); 1743} 1744 1745static ssize_t 1746virthba_release_lun(struct device *cdev, struct device_attribute *attr, 1747 const char *buf, size_t count) 1748{ 1749 struct uisscsi_dest vdest; 1750 struct Scsi_Host *shost = class_to_shost(cdev); 1751 int i; 1752 1753 i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun); 1754 if (i != 3) 1755 return i; 1756 1757 return forward_vdiskmgmt_command(VDISK_MGMT_RELEASE, shost, &vdest); 1758} 1759 1760#define CLASS_DEVICE_ATTR(_name, _mode, _show, _store) \ 1761 struct device_attribute class_device_attr_##_name = \ 1762 __ATTR(_name, _mode, _show, _store) 1763 1764static CLASS_DEVICE_ATTR(acquire_lun, S_IWUSR, NULL, virthba_acquire_lun); 1765static CLASS_DEVICE_ATTR(release_lun, S_IWUSR, NULL, virthba_release_lun); 1766 1767static DEVICE_ATTRIBUTE *virthba_shost_attrs[] = { 1768 &class_device_attr_acquire_lun, 1769 &class_device_attr_release_lun, 1770 NULL 1771}; 1772 1773static void __exit 1774virthba_mod_exit(void) 1775{ 1776 LOGINF("entering virthba_mod_exit...\n"); 1777 1778 virtpci_unregister_driver(&virthba_driver); 1779 /* unregister is going to call virthba_remove */ 1780 /* destroy serverdown completion workqueue */ 1781 if (virthba_serverdown_workqueue) { 1782 destroy_workqueue(virthba_serverdown_workqueue); 1783 virthba_serverdown_workqueue = NULL; 1784 } 1785 1786 if (info_proc_entry) 1787 remove_proc_entry(INFO_PROC_ENTRY_FN, virthba_proc_dir); 1788 1789 if (rqwaitus_proc_entry) 1790 remove_proc_entry(RQWU_PROC_ENTRY_FN, NULL); 1791 1792 if (enable_ints_proc_entry) 1793 remove_proc_entry(ENABLE_INTS_ENTRY_FN, NULL); 1794 1795 if (virthba_proc_dir) 1796 remove_proc_entry(DIR_PROC_ENTRY, NULL); 1797 1798 LOGINF("Leaving virthba_mod_exit\n"); 1799 1800} 1801 1802/* specify function to be run at module insertion time */ 1803module_init(virthba_mod_init); 1804 1805/* specify function to be run when module is removed */ 1806module_exit(virthba_mod_exit); 1807 1808MODULE_LICENSE("GPL"); 1809MODULE_AUTHOR("Usha Srinivasan"); 1810MODULE_ALIAS("uisvirthba"); 1811 /* this is extracted during depmod and kept in modules.dep */ 1812/* module parameter */ 1813module_param(virthba_options, charp, S_IRUGO); 1814