commsup.c revision c8f7b073e0e81499474a84ee2a90f77f7805c7f8
1/* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; see the file COPYING. If not, write to 22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * 24 * Module Name: 25 * commsup.c 26 * 27 * Abstract: Contain all routines that are required for FSA host/adapter 28 * communication. 29 * 30 */ 31 32#include <linux/kernel.h> 33#include <linux/init.h> 34#include <linux/types.h> 35#include <linux/sched.h> 36#include <linux/pci.h> 37#include <linux/spinlock.h> 38#include <linux/slab.h> 39#include <linux/completion.h> 40#include <linux/blkdev.h> 41#include <linux/delay.h> 42#include <linux/kthread.h> 43#include <scsi/scsi_host.h> 44#include <scsi/scsi_device.h> 45#include <asm/semaphore.h> 46 47#include "aacraid.h" 48 49/** 50 * fib_map_alloc - allocate the fib objects 51 * @dev: Adapter to allocate for 52 * 53 * Allocate and map the shared PCI space for the FIB blocks used to 54 * talk to the Adaptec firmware. 55 */ 56 57static int fib_map_alloc(struct aac_dev *dev) 58{ 59 dprintk((KERN_INFO 60 "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", 61 dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, 62 AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); 63 if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size 64 * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), 65 &dev->hw_fib_pa))==NULL) 66 return -ENOMEM; 67 return 0; 68} 69 70/** 71 * aac_fib_map_free - free the fib objects 72 * @dev: Adapter to free 73 * 74 * Free the PCI mappings and the memory allocated for FIB blocks 75 * on this adapter. 76 */ 77 78void aac_fib_map_free(struct aac_dev *dev) 79{ 80 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa); 81} 82 83/** 84 * aac_fib_setup - setup the fibs 85 * @dev: Adapter to set up 86 * 87 * Allocate the PCI space for the fibs, map it and then intialise the 88 * fib area, the unmapped fib data and also the free list 89 */ 90 91int aac_fib_setup(struct aac_dev * dev) 92{ 93 struct fib *fibptr; 94 struct hw_fib *hw_fib_va; 95 dma_addr_t hw_fib_pa; 96 int i; 97 98 while (((i = fib_map_alloc(dev)) == -ENOMEM) 99 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) { 100 dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1); 101 dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB; 102 } 103 if (i<0) 104 return -ENOMEM; 105 106 hw_fib_va = dev->hw_fib_va; 107 hw_fib_pa = dev->hw_fib_pa; 108 memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); 109 /* 110 * Initialise the fibs 111 */ 112 for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++) 113 { 114 fibptr->dev = dev; 115 fibptr->hw_fib = hw_fib_va; 116 fibptr->data = (void *) fibptr->hw_fib->data; 117 fibptr->next = fibptr+1; /* Forward chain the fibs */ 118 init_MUTEX_LOCKED(&fibptr->event_wait); 119 spin_lock_init(&fibptr->event_lock); 120 hw_fib_va->header.XferState = cpu_to_le32(0xffffffff); 121 hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size); 122 fibptr->hw_fib_pa = hw_fib_pa; 123 hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size); 124 hw_fib_pa = hw_fib_pa + dev->max_fib_size; 125 } 126 /* 127 * Add the fib chain to the free list 128 */ 129 dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL; 130 /* 131 * Enable this to debug out of queue space 132 */ 133 dev->free_fib = &dev->fibs[0]; 134 return 0; 135} 136 137/** 138 * aac_fib_alloc - allocate a fib 139 * @dev: Adapter to allocate the fib for 140 * 141 * Allocate a fib from the adapter fib pool. If the pool is empty we 142 * return NULL. 143 */ 144 145struct fib *aac_fib_alloc(struct aac_dev *dev) 146{ 147 struct fib * fibptr; 148 unsigned long flags; 149 spin_lock_irqsave(&dev->fib_lock, flags); 150 fibptr = dev->free_fib; 151 if(!fibptr){ 152 spin_unlock_irqrestore(&dev->fib_lock, flags); 153 return fibptr; 154 } 155 dev->free_fib = fibptr->next; 156 spin_unlock_irqrestore(&dev->fib_lock, flags); 157 /* 158 * Set the proper node type code and node byte size 159 */ 160 fibptr->type = FSAFS_NTC_FIB_CONTEXT; 161 fibptr->size = sizeof(struct fib); 162 /* 163 * Null out fields that depend on being zero at the start of 164 * each I/O 165 */ 166 fibptr->hw_fib->header.XferState = 0; 167 fibptr->callback = NULL; 168 fibptr->callback_data = NULL; 169 170 return fibptr; 171} 172 173/** 174 * aac_fib_free - free a fib 175 * @fibptr: fib to free up 176 * 177 * Frees up a fib and places it on the appropriate queue 178 * (either free or timed out) 179 */ 180 181void aac_fib_free(struct fib *fibptr) 182{ 183 unsigned long flags; 184 185 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 186 if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) { 187 aac_config.fib_timeouts++; 188 fibptr->next = fibptr->dev->timeout_fib; 189 fibptr->dev->timeout_fib = fibptr; 190 } else { 191 if (fibptr->hw_fib->header.XferState != 0) { 192 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 193 (void*)fibptr, 194 le32_to_cpu(fibptr->hw_fib->header.XferState)); 195 } 196 fibptr->next = fibptr->dev->free_fib; 197 fibptr->dev->free_fib = fibptr; 198 } 199 spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags); 200} 201 202/** 203 * aac_fib_init - initialise a fib 204 * @fibptr: The fib to initialize 205 * 206 * Set up the generic fib fields ready for use 207 */ 208 209void aac_fib_init(struct fib *fibptr) 210{ 211 struct hw_fib *hw_fib = fibptr->hw_fib; 212 213 hw_fib->header.StructType = FIB_MAGIC; 214 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); 215 hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); 216 hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */ 217 hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); 218 hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size); 219} 220 221/** 222 * fib_deallocate - deallocate a fib 223 * @fibptr: fib to deallocate 224 * 225 * Will deallocate and return to the free pool the FIB pointed to by the 226 * caller. 227 */ 228 229static void fib_dealloc(struct fib * fibptr) 230{ 231 struct hw_fib *hw_fib = fibptr->hw_fib; 232 BUG_ON(hw_fib->header.StructType != FIB_MAGIC); 233 hw_fib->header.XferState = 0; 234} 235 236/* 237 * Commuication primitives define and support the queuing method we use to 238 * support host to adapter commuication. All queue accesses happen through 239 * these routines and are the only routines which have a knowledge of the 240 * how these queues are implemented. 241 */ 242 243/** 244 * aac_get_entry - get a queue entry 245 * @dev: Adapter 246 * @qid: Queue Number 247 * @entry: Entry return 248 * @index: Index return 249 * @nonotify: notification control 250 * 251 * With a priority the routine returns a queue entry if the queue has free entries. If the queue 252 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is 253 * returned. 254 */ 255 256static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) 257{ 258 struct aac_queue * q; 259 unsigned long idx; 260 261 /* 262 * All of the queues wrap when they reach the end, so we check 263 * to see if they have reached the end and if they have we just 264 * set the index back to zero. This is a wrap. You could or off 265 * the high bits in all updates but this is a bit faster I think. 266 */ 267 268 q = &dev->queues->queue[qid]; 269 270 idx = *index = le32_to_cpu(*(q->headers.producer)); 271 /* Interrupt Moderation, only interrupt for first two entries */ 272 if (idx != le32_to_cpu(*(q->headers.consumer))) { 273 if (--idx == 0) { 274 if (qid == AdapNormCmdQueue) 275 idx = ADAP_NORM_CMD_ENTRIES; 276 else 277 idx = ADAP_NORM_RESP_ENTRIES; 278 } 279 if (idx != le32_to_cpu(*(q->headers.consumer))) 280 *nonotify = 1; 281 } 282 283 if (qid == AdapNormCmdQueue) { 284 if (*index >= ADAP_NORM_CMD_ENTRIES) 285 *index = 0; /* Wrap to front of the Producer Queue. */ 286 } else { 287 if (*index >= ADAP_NORM_RESP_ENTRIES) 288 *index = 0; /* Wrap to front of the Producer Queue. */ 289 } 290 291 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ 292 printk(KERN_WARNING "Queue %d full, %u outstanding.\n", 293 qid, q->numpending); 294 return 0; 295 } else { 296 *entry = q->base + *index; 297 return 1; 298 } 299} 300 301/** 302 * aac_queue_get - get the next free QE 303 * @dev: Adapter 304 * @index: Returned index 305 * @priority: Priority of fib 306 * @fib: Fib to associate with the queue entry 307 * @wait: Wait if queue full 308 * @fibptr: Driver fib object to go with fib 309 * @nonotify: Don't notify the adapter 310 * 311 * Gets the next free QE off the requested priorty adapter command 312 * queue and associates the Fib with the QE. The QE represented by 313 * index is ready to insert on the queue when this routine returns 314 * success. 315 */ 316 317static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) 318{ 319 struct aac_entry * entry = NULL; 320 int map = 0; 321 322 if (qid == AdapNormCmdQueue) { 323 /* if no entries wait for some if caller wants to */ 324 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 325 { 326 printk(KERN_ERR "GetEntries failed\n"); 327 } 328 /* 329 * Setup queue entry with a command, status and fib mapped 330 */ 331 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 332 map = 1; 333 } else { 334 while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 335 { 336 /* if no entries wait for some if caller wants to */ 337 } 338 /* 339 * Setup queue entry with command, status and fib mapped 340 */ 341 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 342 entry->addr = hw_fib->header.SenderFibAddress; 343 /* Restore adapters pointer to the FIB */ 344 hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ 345 map = 0; 346 } 347 /* 348 * If MapFib is true than we need to map the Fib and put pointers 349 * in the queue entry. 350 */ 351 if (map) 352 entry->addr = cpu_to_le32(fibptr->hw_fib_pa); 353 return 0; 354} 355 356/* 357 * Define the highest level of host to adapter communication routines. 358 * These routines will support host to adapter FS commuication. These 359 * routines have no knowledge of the commuication method used. This level 360 * sends and receives FIBs. This level has no knowledge of how these FIBs 361 * get passed back and forth. 362 */ 363 364/** 365 * aac_fib_send - send a fib to the adapter 366 * @command: Command to send 367 * @fibptr: The fib 368 * @size: Size of fib data area 369 * @priority: Priority of Fib 370 * @wait: Async/sync select 371 * @reply: True if a reply is wanted 372 * @callback: Called with reply 373 * @callback_data: Passed to callback 374 * 375 * Sends the requested FIB to the adapter and optionally will wait for a 376 * response FIB. If the caller does not wish to wait for a response than 377 * an event to wait on must be supplied. This event will be set when a 378 * response FIB is received from the adapter. 379 */ 380 381int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, 382 int priority, int wait, int reply, fib_callback callback, 383 void *callback_data) 384{ 385 struct aac_dev * dev = fibptr->dev; 386 struct hw_fib * hw_fib = fibptr->hw_fib; 387 struct aac_queue * q; 388 unsigned long flags = 0; 389 unsigned long qflags; 390 391 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 392 return -EBUSY; 393 /* 394 * There are 5 cases with the wait and reponse requested flags. 395 * The only invalid cases are if the caller requests to wait and 396 * does not request a response and if the caller does not want a 397 * response and the Fib is not allocated from pool. If a response 398 * is not requesed the Fib will just be deallocaed by the DPC 399 * routine when the response comes back from the adapter. No 400 * further processing will be done besides deleting the Fib. We 401 * will have a debug mode where the adapter can notify the host 402 * it had a problem and the host can log that fact. 403 */ 404 if (wait && !reply) { 405 return -EINVAL; 406 } else if (!wait && reply) { 407 hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected); 408 FIB_COUNTER_INCREMENT(aac_config.AsyncSent); 409 } else if (!wait && !reply) { 410 hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected); 411 FIB_COUNTER_INCREMENT(aac_config.NoResponseSent); 412 } else if (wait && reply) { 413 hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); 414 FIB_COUNTER_INCREMENT(aac_config.NormalSent); 415 } 416 /* 417 * Map the fib into 32bits by using the fib number 418 */ 419 420 hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); 421 hw_fib->header.SenderData = (u32)(fibptr - dev->fibs); 422 /* 423 * Set FIB state to indicate where it came from and if we want a 424 * response from the adapter. Also load the command from the 425 * caller. 426 * 427 * Map the hw fib pointer as a 32bit value 428 */ 429 hw_fib->header.Command = cpu_to_le16(command); 430 hw_fib->header.XferState |= cpu_to_le32(SentFromHost); 431 fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/ 432 /* 433 * Set the size of the Fib we want to send to the adapter 434 */ 435 hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); 436 if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { 437 return -EMSGSIZE; 438 } 439 /* 440 * Get a queue entry connect the FIB to it and send an notify 441 * the adapter a command is ready. 442 */ 443 hw_fib->header.XferState |= cpu_to_le32(NormalPriority); 444 445 /* 446 * Fill in the Callback and CallbackContext if we are not 447 * going to wait. 448 */ 449 if (!wait) { 450 fibptr->callback = callback; 451 fibptr->callback_data = callback_data; 452 } 453 454 fibptr->done = 0; 455 fibptr->flags = 0; 456 457 FIB_COUNTER_INCREMENT(aac_config.FibsSent); 458 459 dprintk((KERN_DEBUG "Fib contents:.\n")); 460 dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command))); 461 dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command))); 462 dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState))); 463 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); 464 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); 465 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); 466 467 if (!dev->queues) 468 return -ENODEV; 469 q = &dev->queues->queue[AdapNormCmdQueue]; 470 471 if(wait) 472 spin_lock_irqsave(&fibptr->event_lock, flags); 473 spin_lock_irqsave(q->lock, qflags); 474 if (dev->new_comm_interface) { 475 unsigned long count = 10000000L; /* 50 seconds */ 476 q->numpending++; 477 spin_unlock_irqrestore(q->lock, qflags); 478 while (aac_adapter_send(fibptr) != 0) { 479 if (--count == 0) { 480 if (wait) 481 spin_unlock_irqrestore(&fibptr->event_lock, flags); 482 spin_lock_irqsave(q->lock, qflags); 483 q->numpending--; 484 spin_unlock_irqrestore(q->lock, qflags); 485 return -ETIMEDOUT; 486 } 487 udelay(5); 488 } 489 } else { 490 u32 index; 491 unsigned long nointr = 0; 492 aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr); 493 494 q->numpending++; 495 *(q->headers.producer) = cpu_to_le32(index + 1); 496 spin_unlock_irqrestore(q->lock, qflags); 497 dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index)); 498 if (!(nointr & aac_config.irq_mod)) 499 aac_adapter_notify(dev, AdapNormCmdQueue); 500 } 501 502 /* 503 * If the caller wanted us to wait for response wait now. 504 */ 505 506 if (wait) { 507 spin_unlock_irqrestore(&fibptr->event_lock, flags); 508 /* Only set for first known interruptable command */ 509 if (wait < 0) { 510 /* 511 * *VERY* Dangerous to time out a command, the 512 * assumption is made that we have no hope of 513 * functioning because an interrupt routing or other 514 * hardware failure has occurred. 515 */ 516 unsigned long count = 36000000L; /* 3 minutes */ 517 while (down_trylock(&fibptr->event_wait)) { 518 if (--count == 0) { 519 spin_lock_irqsave(q->lock, qflags); 520 q->numpending--; 521 spin_unlock_irqrestore(q->lock, qflags); 522 if (wait == -1) { 523 printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" 524 "Usually a result of a PCI interrupt routing problem;\n" 525 "update mother board BIOS or consider utilizing one of\n" 526 "the SAFE mode kernel options (acpi, apic etc)\n"); 527 } 528 return -ETIMEDOUT; 529 } 530 udelay(5); 531 } 532 } else if (down_interruptible(&fibptr->event_wait)) { 533 spin_lock_irqsave(&fibptr->event_lock, flags); 534 if (fibptr->done == 0) { 535 fibptr->done = 2; /* Tell interrupt we aborted */ 536 spin_unlock_irqrestore(&fibptr->event_lock, flags); 537 return -EINTR; 538 } 539 spin_unlock_irqrestore(&fibptr->event_lock, flags); 540 } 541 BUG_ON(fibptr->done == 0); 542 543 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){ 544 return -ETIMEDOUT; 545 } else { 546 return 0; 547 } 548 } 549 /* 550 * If the user does not want a response than return success otherwise 551 * return pending 552 */ 553 if (reply) 554 return -EINPROGRESS; 555 else 556 return 0; 557} 558 559/** 560 * aac_consumer_get - get the top of the queue 561 * @dev: Adapter 562 * @q: Queue 563 * @entry: Return entry 564 * 565 * Will return a pointer to the entry on the top of the queue requested that 566 * we are a consumer of, and return the address of the queue entry. It does 567 * not change the state of the queue. 568 */ 569 570int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry) 571{ 572 u32 index; 573 int status; 574 if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) { 575 status = 0; 576 } else { 577 /* 578 * The consumer index must be wrapped if we have reached 579 * the end of the queue, else we just use the entry 580 * pointed to by the header index 581 */ 582 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 583 index = 0; 584 else 585 index = le32_to_cpu(*q->headers.consumer); 586 *entry = q->base + index; 587 status = 1; 588 } 589 return(status); 590} 591 592/** 593 * aac_consumer_free - free consumer entry 594 * @dev: Adapter 595 * @q: Queue 596 * @qid: Queue ident 597 * 598 * Frees up the current top of the queue we are a consumer of. If the 599 * queue was full notify the producer that the queue is no longer full. 600 */ 601 602void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) 603{ 604 int wasfull = 0; 605 u32 notify; 606 607 if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) 608 wasfull = 1; 609 610 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 611 *q->headers.consumer = cpu_to_le32(1); 612 else 613 *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1); 614 615 if (wasfull) { 616 switch (qid) { 617 618 case HostNormCmdQueue: 619 notify = HostNormCmdNotFull; 620 break; 621 case HostNormRespQueue: 622 notify = HostNormRespNotFull; 623 break; 624 default: 625 BUG(); 626 return; 627 } 628 aac_adapter_notify(dev, notify); 629 } 630} 631 632/** 633 * aac_fib_adapter_complete - complete adapter issued fib 634 * @fibptr: fib to complete 635 * @size: size of fib 636 * 637 * Will do all necessary work to complete a FIB that was sent from 638 * the adapter. 639 */ 640 641int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) 642{ 643 struct hw_fib * hw_fib = fibptr->hw_fib; 644 struct aac_dev * dev = fibptr->dev; 645 struct aac_queue * q; 646 unsigned long nointr = 0; 647 unsigned long qflags; 648 649 if (hw_fib->header.XferState == 0) { 650 if (dev->new_comm_interface) 651 kfree (hw_fib); 652 return 0; 653 } 654 /* 655 * If we plan to do anything check the structure type first. 656 */ 657 if ( hw_fib->header.StructType != FIB_MAGIC ) { 658 if (dev->new_comm_interface) 659 kfree (hw_fib); 660 return -EINVAL; 661 } 662 /* 663 * This block handles the case where the adapter had sent us a 664 * command and we have finished processing the command. We 665 * call completeFib when we are done processing the command 666 * and want to send a response back to the adapter. This will 667 * send the completed cdb to the adapter. 668 */ 669 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { 670 if (dev->new_comm_interface) { 671 kfree (hw_fib); 672 } else { 673 u32 index; 674 hw_fib->header.XferState |= cpu_to_le32(HostProcessed); 675 if (size) { 676 size += sizeof(struct aac_fibhdr); 677 if (size > le16_to_cpu(hw_fib->header.SenderSize)) 678 return -EMSGSIZE; 679 hw_fib->header.Size = cpu_to_le16(size); 680 } 681 q = &dev->queues->queue[AdapNormRespQueue]; 682 spin_lock_irqsave(q->lock, qflags); 683 aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr); 684 *(q->headers.producer) = cpu_to_le32(index + 1); 685 spin_unlock_irqrestore(q->lock, qflags); 686 if (!(nointr & (int)aac_config.irq_mod)) 687 aac_adapter_notify(dev, AdapNormRespQueue); 688 } 689 } 690 else 691 { 692 printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n"); 693 BUG(); 694 } 695 return 0; 696} 697 698/** 699 * aac_fib_complete - fib completion handler 700 * @fib: FIB to complete 701 * 702 * Will do all necessary work to complete a FIB. 703 */ 704 705int aac_fib_complete(struct fib *fibptr) 706{ 707 struct hw_fib * hw_fib = fibptr->hw_fib; 708 709 /* 710 * Check for a fib which has already been completed 711 */ 712 713 if (hw_fib->header.XferState == 0) 714 return 0; 715 /* 716 * If we plan to do anything check the structure type first. 717 */ 718 719 if (hw_fib->header.StructType != FIB_MAGIC) 720 return -EINVAL; 721 /* 722 * This block completes a cdb which orginated on the host and we 723 * just need to deallocate the cdb or reinit it. At this point the 724 * command is complete that we had sent to the adapter and this 725 * cdb could be reused. 726 */ 727 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && 728 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) 729 { 730 fib_dealloc(fibptr); 731 } 732 else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost)) 733 { 734 /* 735 * This handles the case when the host has aborted the I/O 736 * to the adapter because the adapter is not responding 737 */ 738 fib_dealloc(fibptr); 739 } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) { 740 fib_dealloc(fibptr); 741 } else { 742 BUG(); 743 } 744 return 0; 745} 746 747/** 748 * aac_printf - handle printf from firmware 749 * @dev: Adapter 750 * @val: Message info 751 * 752 * Print a message passed to us by the controller firmware on the 753 * Adaptec board 754 */ 755 756void aac_printf(struct aac_dev *dev, u32 val) 757{ 758 char *cp = dev->printfbuf; 759 if (dev->printf_enabled) 760 { 761 int length = val & 0xffff; 762 int level = (val >> 16) & 0xffff; 763 764 /* 765 * The size of the printfbuf is set in port.c 766 * There is no variable or define for it 767 */ 768 if (length > 255) 769 length = 255; 770 if (cp[length] != 0) 771 cp[length] = 0; 772 if (level == LOG_AAC_HIGH_ERROR) 773 printk(KERN_WARNING "%s:%s", dev->name, cp); 774 else 775 printk(KERN_INFO "%s:%s", dev->name, cp); 776 } 777 memset(cp, 0, 256); 778} 779 780 781/** 782 * aac_handle_aif - Handle a message from the firmware 783 * @dev: Which adapter this fib is from 784 * @fibptr: Pointer to fibptr from adapter 785 * 786 * This routine handles a driver notify fib from the adapter and 787 * dispatches it to the appropriate routine for handling. 788 */ 789 790#define AIF_SNIFF_TIMEOUT (30*HZ) 791static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) 792{ 793 struct hw_fib * hw_fib = fibptr->hw_fib; 794 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; 795 int busy; 796 u32 container; 797 struct scsi_device *device; 798 enum { 799 NOTHING, 800 DELETE, 801 ADD, 802 CHANGE 803 } device_config_needed; 804 805 /* Sniff for container changes */ 806 807 if (!dev || !dev->fsa_dev) 808 return; 809 container = (u32)-1; 810 811 /* 812 * We have set this up to try and minimize the number of 813 * re-configures that take place. As a result of this when 814 * certain AIF's come in we will set a flag waiting for another 815 * type of AIF before setting the re-config flag. 816 */ 817 switch (le32_to_cpu(aifcmd->command)) { 818 case AifCmdDriverNotify: 819 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { 820 /* 821 * Morph or Expand complete 822 */ 823 case AifDenMorphComplete: 824 case AifDenVolumeExtendComplete: 825 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 826 if (container >= dev->maximum_num_containers) 827 break; 828 829 /* 830 * Find the scsi_device associated with the SCSI 831 * address. Make sure we have the right array, and if 832 * so set the flag to initiate a new re-config once we 833 * see an AifEnConfigChange AIF come through. 834 */ 835 836 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { 837 device = scsi_device_lookup(dev->scsi_host_ptr, 838 CONTAINER_TO_CHANNEL(container), 839 CONTAINER_TO_ID(container), 840 CONTAINER_TO_LUN(container)); 841 if (device) { 842 dev->fsa_dev[container].config_needed = CHANGE; 843 dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; 844 dev->fsa_dev[container].config_waiting_stamp = jiffies; 845 scsi_device_put(device); 846 } 847 } 848 } 849 850 /* 851 * If we are waiting on something and this happens to be 852 * that thing then set the re-configure flag. 853 */ 854 if (container != (u32)-1) { 855 if (container >= dev->maximum_num_containers) 856 break; 857 if ((dev->fsa_dev[container].config_waiting_on == 858 le32_to_cpu(*(u32 *)aifcmd->data)) && 859 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 860 dev->fsa_dev[container].config_waiting_on = 0; 861 } else for (container = 0; 862 container < dev->maximum_num_containers; ++container) { 863 if ((dev->fsa_dev[container].config_waiting_on == 864 le32_to_cpu(*(u32 *)aifcmd->data)) && 865 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 866 dev->fsa_dev[container].config_waiting_on = 0; 867 } 868 break; 869 870 case AifCmdEventNotify: 871 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { 872 /* 873 * Add an Array. 874 */ 875 case AifEnAddContainer: 876 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 877 if (container >= dev->maximum_num_containers) 878 break; 879 dev->fsa_dev[container].config_needed = ADD; 880 dev->fsa_dev[container].config_waiting_on = 881 AifEnConfigChange; 882 dev->fsa_dev[container].config_waiting_stamp = jiffies; 883 break; 884 885 /* 886 * Delete an Array. 887 */ 888 case AifEnDeleteContainer: 889 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 890 if (container >= dev->maximum_num_containers) 891 break; 892 dev->fsa_dev[container].config_needed = DELETE; 893 dev->fsa_dev[container].config_waiting_on = 894 AifEnConfigChange; 895 dev->fsa_dev[container].config_waiting_stamp = jiffies; 896 break; 897 898 /* 899 * Container change detected. If we currently are not 900 * waiting on something else, setup to wait on a Config Change. 901 */ 902 case AifEnContainerChange: 903 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 904 if (container >= dev->maximum_num_containers) 905 break; 906 if (dev->fsa_dev[container].config_waiting_on && 907 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 908 break; 909 dev->fsa_dev[container].config_needed = CHANGE; 910 dev->fsa_dev[container].config_waiting_on = 911 AifEnConfigChange; 912 dev->fsa_dev[container].config_waiting_stamp = jiffies; 913 break; 914 915 case AifEnConfigChange: 916 break; 917 918 } 919 920 /* 921 * If we are waiting on something and this happens to be 922 * that thing then set the re-configure flag. 923 */ 924 if (container != (u32)-1) { 925 if (container >= dev->maximum_num_containers) 926 break; 927 if ((dev->fsa_dev[container].config_waiting_on == 928 le32_to_cpu(*(u32 *)aifcmd->data)) && 929 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 930 dev->fsa_dev[container].config_waiting_on = 0; 931 } else for (container = 0; 932 container < dev->maximum_num_containers; ++container) { 933 if ((dev->fsa_dev[container].config_waiting_on == 934 le32_to_cpu(*(u32 *)aifcmd->data)) && 935 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 936 dev->fsa_dev[container].config_waiting_on = 0; 937 } 938 break; 939 940 case AifCmdJobProgress: 941 /* 942 * These are job progress AIF's. When a Clear is being 943 * done on a container it is initially created then hidden from 944 * the OS. When the clear completes we don't get a config 945 * change so we monitor the job status complete on a clear then 946 * wait for a container change. 947 */ 948 949 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) 950 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5]) 951 || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) { 952 for (container = 0; 953 container < dev->maximum_num_containers; 954 ++container) { 955 /* 956 * Stomp on all config sequencing for all 957 * containers? 958 */ 959 dev->fsa_dev[container].config_waiting_on = 960 AifEnContainerChange; 961 dev->fsa_dev[container].config_needed = ADD; 962 dev->fsa_dev[container].config_waiting_stamp = 963 jiffies; 964 } 965 } 966 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) 967 && (((u32 *)aifcmd->data)[6] == 0) 968 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) { 969 for (container = 0; 970 container < dev->maximum_num_containers; 971 ++container) { 972 /* 973 * Stomp on all config sequencing for all 974 * containers? 975 */ 976 dev->fsa_dev[container].config_waiting_on = 977 AifEnContainerChange; 978 dev->fsa_dev[container].config_needed = DELETE; 979 dev->fsa_dev[container].config_waiting_stamp = 980 jiffies; 981 } 982 } 983 break; 984 } 985 986 device_config_needed = NOTHING; 987 for (container = 0; container < dev->maximum_num_containers; 988 ++container) { 989 if ((dev->fsa_dev[container].config_waiting_on == 0) && 990 (dev->fsa_dev[container].config_needed != NOTHING) && 991 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) { 992 device_config_needed = 993 dev->fsa_dev[container].config_needed; 994 dev->fsa_dev[container].config_needed = NOTHING; 995 break; 996 } 997 } 998 if (device_config_needed == NOTHING) 999 return; 1000 1001 /* 1002 * If we decided that a re-configuration needs to be done, 1003 * schedule it here on the way out the door, please close the door 1004 * behind you. 1005 */ 1006 1007 busy = 0; 1008 1009 1010 /* 1011 * Find the scsi_device associated with the SCSI address, 1012 * and mark it as changed, invalidating the cache. This deals 1013 * with changes to existing device IDs. 1014 */ 1015 1016 if (!dev || !dev->scsi_host_ptr) 1017 return; 1018 /* 1019 * force reload of disk info via aac_probe_container 1020 */ 1021 if ((device_config_needed == CHANGE) 1022 && (dev->fsa_dev[container].valid == 1)) 1023 dev->fsa_dev[container].valid = 2; 1024 if ((device_config_needed == CHANGE) || 1025 (device_config_needed == ADD)) 1026 aac_probe_container(dev, container); 1027 device = scsi_device_lookup(dev->scsi_host_ptr, 1028 CONTAINER_TO_CHANNEL(container), 1029 CONTAINER_TO_ID(container), 1030 CONTAINER_TO_LUN(container)); 1031 if (device) { 1032 switch (device_config_needed) { 1033 case DELETE: 1034 scsi_remove_device(device); 1035 break; 1036 case CHANGE: 1037 if (!dev->fsa_dev[container].valid) { 1038 scsi_remove_device(device); 1039 break; 1040 } 1041 scsi_rescan_device(&device->sdev_gendev); 1042 1043 default: 1044 break; 1045 } 1046 scsi_device_put(device); 1047 } 1048 if (device_config_needed == ADD) { 1049 scsi_add_device(dev->scsi_host_ptr, 1050 CONTAINER_TO_CHANNEL(container), 1051 CONTAINER_TO_ID(container), 1052 CONTAINER_TO_LUN(container)); 1053 } 1054 1055} 1056 1057/** 1058 * aac_command_thread - command processing thread 1059 * @dev: Adapter to monitor 1060 * 1061 * Waits on the commandready event in it's queue. When the event gets set 1062 * it will pull FIBs off it's queue. It will continue to pull FIBs off 1063 * until the queue is empty. When the queue is empty it will wait for 1064 * more FIBs. 1065 */ 1066 1067int aac_command_thread(void *data) 1068{ 1069 struct aac_dev *dev = data; 1070 struct hw_fib *hw_fib, *hw_newfib; 1071 struct fib *fib, *newfib; 1072 struct aac_fib_context *fibctx; 1073 unsigned long flags; 1074 DECLARE_WAITQUEUE(wait, current); 1075 1076 /* 1077 * We can only have one thread per adapter for AIF's. 1078 */ 1079 if (dev->aif_thread) 1080 return -EINVAL; 1081 1082 /* 1083 * Let the DPC know it has a place to send the AIF's to. 1084 */ 1085 dev->aif_thread = 1; 1086 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 1087 set_current_state(TASK_INTERRUPTIBLE); 1088 dprintk ((KERN_INFO "aac_command_thread start\n")); 1089 while(1) 1090 { 1091 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); 1092 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { 1093 struct list_head *entry; 1094 struct aac_aifcmd * aifcmd; 1095 1096 set_current_state(TASK_RUNNING); 1097 1098 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; 1099 list_del(entry); 1100 1101 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); 1102 fib = list_entry(entry, struct fib, fiblink); 1103 /* 1104 * We will process the FIB here or pass it to a 1105 * worker thread that is TBD. We Really can't 1106 * do anything at this point since we don't have 1107 * anything defined for this thread to do. 1108 */ 1109 hw_fib = fib->hw_fib; 1110 memset(fib, 0, sizeof(struct fib)); 1111 fib->type = FSAFS_NTC_FIB_CONTEXT; 1112 fib->size = sizeof( struct fib ); 1113 fib->hw_fib = hw_fib; 1114 fib->data = hw_fib->data; 1115 fib->dev = dev; 1116 /* 1117 * We only handle AifRequest fibs from the adapter. 1118 */ 1119 aifcmd = (struct aac_aifcmd *) hw_fib->data; 1120 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { 1121 /* Handle Driver Notify Events */ 1122 aac_handle_aif(dev, fib); 1123 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1124 aac_fib_adapter_complete(fib, (u16)sizeof(u32)); 1125 } else { 1126 struct list_head *entry; 1127 /* The u32 here is important and intended. We are using 1128 32bit wrapping time to fit the adapter field */ 1129 1130 u32 time_now, time_last; 1131 unsigned long flagv; 1132 unsigned num; 1133 struct hw_fib ** hw_fib_pool, ** hw_fib_p; 1134 struct fib ** fib_pool, ** fib_p; 1135 1136 /* Sniff events */ 1137 if ((aifcmd->command == 1138 cpu_to_le32(AifCmdEventNotify)) || 1139 (aifcmd->command == 1140 cpu_to_le32(AifCmdJobProgress))) { 1141 aac_handle_aif(dev, fib); 1142 } 1143 1144 time_now = jiffies/HZ; 1145 1146 /* 1147 * Warning: no sleep allowed while 1148 * holding spinlock. We take the estimate 1149 * and pre-allocate a set of fibs outside the 1150 * lock. 1151 */ 1152 num = le32_to_cpu(dev->init->AdapterFibsSize) 1153 / sizeof(struct hw_fib); /* some extra */ 1154 spin_lock_irqsave(&dev->fib_lock, flagv); 1155 entry = dev->fib_list.next; 1156 while (entry != &dev->fib_list) { 1157 entry = entry->next; 1158 ++num; 1159 } 1160 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1161 hw_fib_pool = NULL; 1162 fib_pool = NULL; 1163 if (num 1164 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL))) 1165 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) { 1166 hw_fib_p = hw_fib_pool; 1167 fib_p = fib_pool; 1168 while (hw_fib_p < &hw_fib_pool[num]) { 1169 if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) { 1170 --hw_fib_p; 1171 break; 1172 } 1173 if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) { 1174 kfree(*(--hw_fib_p)); 1175 break; 1176 } 1177 } 1178 if ((num = hw_fib_p - hw_fib_pool) == 0) { 1179 kfree(fib_pool); 1180 fib_pool = NULL; 1181 kfree(hw_fib_pool); 1182 hw_fib_pool = NULL; 1183 } 1184 } else { 1185 kfree(hw_fib_pool); 1186 hw_fib_pool = NULL; 1187 } 1188 spin_lock_irqsave(&dev->fib_lock, flagv); 1189 entry = dev->fib_list.next; 1190 /* 1191 * For each Context that is on the 1192 * fibctxList, make a copy of the 1193 * fib, and then set the event to wake up the 1194 * thread that is waiting for it. 1195 */ 1196 hw_fib_p = hw_fib_pool; 1197 fib_p = fib_pool; 1198 while (entry != &dev->fib_list) { 1199 /* 1200 * Extract the fibctx 1201 */ 1202 fibctx = list_entry(entry, struct aac_fib_context, next); 1203 /* 1204 * Check if the queue is getting 1205 * backlogged 1206 */ 1207 if (fibctx->count > 20) 1208 { 1209 /* 1210 * It's *not* jiffies folks, 1211 * but jiffies / HZ so do not 1212 * panic ... 1213 */ 1214 time_last = fibctx->jiffies; 1215 /* 1216 * Has it been > 2 minutes 1217 * since the last read off 1218 * the queue? 1219 */ 1220 if ((time_now - time_last) > aif_timeout) { 1221 entry = entry->next; 1222 aac_close_fib_context(dev, fibctx); 1223 continue; 1224 } 1225 } 1226 /* 1227 * Warning: no sleep allowed while 1228 * holding spinlock 1229 */ 1230 if (hw_fib_p < &hw_fib_pool[num]) { 1231 hw_newfib = *hw_fib_p; 1232 *(hw_fib_p++) = NULL; 1233 newfib = *fib_p; 1234 *(fib_p++) = NULL; 1235 /* 1236 * Make the copy of the FIB 1237 */ 1238 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); 1239 memcpy(newfib, fib, sizeof(struct fib)); 1240 newfib->hw_fib = hw_newfib; 1241 /* 1242 * Put the FIB onto the 1243 * fibctx's fibs 1244 */ 1245 list_add_tail(&newfib->fiblink, &fibctx->fib_list); 1246 fibctx->count++; 1247 /* 1248 * Set the event to wake up the 1249 * thread that is waiting. 1250 */ 1251 up(&fibctx->wait_sem); 1252 } else { 1253 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); 1254 } 1255 entry = entry->next; 1256 } 1257 /* 1258 * Set the status of this FIB 1259 */ 1260 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1261 aac_fib_adapter_complete(fib, sizeof(u32)); 1262 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1263 /* Free up the remaining resources */ 1264 hw_fib_p = hw_fib_pool; 1265 fib_p = fib_pool; 1266 while (hw_fib_p < &hw_fib_pool[num]) { 1267 kfree(*hw_fib_p); 1268 kfree(*fib_p); 1269 ++fib_p; 1270 ++hw_fib_p; 1271 } 1272 kfree(hw_fib_pool); 1273 kfree(fib_pool); 1274 } 1275 kfree(fib); 1276 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); 1277 } 1278 /* 1279 * There are no more AIF's 1280 */ 1281 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); 1282 schedule(); 1283 1284 if (kthread_should_stop()) 1285 break; 1286 set_current_state(TASK_INTERRUPTIBLE); 1287 } 1288 if (dev->queues) 1289 remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 1290 dev->aif_thread = 0; 1291 return 0; 1292} 1293