commsup.c revision 8e0c5ebde82b08f6d996e11983890fc4cc085fab
1/* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; see the file COPYING. If not, write to 22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * 24 * Module Name: 25 * commsup.c 26 * 27 * Abstract: Contain all routines that are required for FSA host/adapter 28 * communication. 29 * 30 */ 31 32#include <linux/kernel.h> 33#include <linux/init.h> 34#include <linux/types.h> 35#include <linux/sched.h> 36#include <linux/pci.h> 37#include <linux/spinlock.h> 38#include <linux/slab.h> 39#include <linux/completion.h> 40#include <linux/blkdev.h> 41#include <scsi/scsi_host.h> 42#include <scsi/scsi_device.h> 43#include <asm/semaphore.h> 44#include <asm/delay.h> 45 46#include "aacraid.h" 47 48/** 49 * fib_map_alloc - allocate the fib objects 50 * @dev: Adapter to allocate for 51 * 52 * Allocate and map the shared PCI space for the FIB blocks used to 53 * talk to the Adaptec firmware. 54 */ 55 56static int fib_map_alloc(struct aac_dev *dev) 57{ 58 dprintk((KERN_INFO 59 "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", 60 dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, 61 AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); 62 if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size 63 * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), 64 &dev->hw_fib_pa))==NULL) 65 return -ENOMEM; 66 return 0; 67} 68 69/** 70 * fib_map_free - free the fib objects 71 * @dev: Adapter to free 72 * 73 * Free the PCI mappings and the memory allocated for FIB blocks 74 * on this adapter. 75 */ 76 77void fib_map_free(struct aac_dev *dev) 78{ 79 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa); 80} 81 82/** 83 * fib_setup - setup the fibs 84 * @dev: Adapter to set up 85 * 86 * Allocate the PCI space for the fibs, map it and then intialise the 87 * fib area, the unmapped fib data and also the free list 88 */ 89 90int fib_setup(struct aac_dev * dev) 91{ 92 struct fib *fibptr; 93 struct hw_fib *hw_fib_va; 94 dma_addr_t hw_fib_pa; 95 int i; 96 97 while (((i = fib_map_alloc(dev)) == -ENOMEM) 98 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) { 99 dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1); 100 dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB; 101 } 102 if (i<0) 103 return -ENOMEM; 104 105 hw_fib_va = dev->hw_fib_va; 106 hw_fib_pa = dev->hw_fib_pa; 107 memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); 108 /* 109 * Initialise the fibs 110 */ 111 for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++) 112 { 113 fibptr->dev = dev; 114 fibptr->hw_fib = hw_fib_va; 115 fibptr->data = (void *) fibptr->hw_fib->data; 116 fibptr->next = fibptr+1; /* Forward chain the fibs */ 117 init_MUTEX_LOCKED(&fibptr->event_wait); 118 spin_lock_init(&fibptr->event_lock); 119 hw_fib_va->header.XferState = cpu_to_le32(0xffffffff); 120 hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size); 121 fibptr->hw_fib_pa = hw_fib_pa; 122 hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size); 123 hw_fib_pa = hw_fib_pa + dev->max_fib_size; 124 } 125 /* 126 * Add the fib chain to the free list 127 */ 128 dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL; 129 /* 130 * Enable this to debug out of queue space 131 */ 132 dev->free_fib = &dev->fibs[0]; 133 return 0; 134} 135 136/** 137 * fib_alloc - allocate a fib 138 * @dev: Adapter to allocate the fib for 139 * 140 * Allocate a fib from the adapter fib pool. If the pool is empty we 141 * return NULL. 142 */ 143 144struct fib * fib_alloc(struct aac_dev *dev) 145{ 146 struct fib * fibptr; 147 unsigned long flags; 148 spin_lock_irqsave(&dev->fib_lock, flags); 149 fibptr = dev->free_fib; 150 if(!fibptr){ 151 spin_unlock_irqrestore(&dev->fib_lock, flags); 152 return fibptr; 153 } 154 dev->free_fib = fibptr->next; 155 spin_unlock_irqrestore(&dev->fib_lock, flags); 156 /* 157 * Set the proper node type code and node byte size 158 */ 159 fibptr->type = FSAFS_NTC_FIB_CONTEXT; 160 fibptr->size = sizeof(struct fib); 161 /* 162 * Null out fields that depend on being zero at the start of 163 * each I/O 164 */ 165 fibptr->hw_fib->header.XferState = 0; 166 fibptr->callback = NULL; 167 fibptr->callback_data = NULL; 168 169 return fibptr; 170} 171 172/** 173 * fib_free - free a fib 174 * @fibptr: fib to free up 175 * 176 * Frees up a fib and places it on the appropriate queue 177 * (either free or timed out) 178 */ 179 180void fib_free(struct fib * fibptr) 181{ 182 unsigned long flags; 183 184 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 185 if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) { 186 aac_config.fib_timeouts++; 187 fibptr->next = fibptr->dev->timeout_fib; 188 fibptr->dev->timeout_fib = fibptr; 189 } else { 190 if (fibptr->hw_fib->header.XferState != 0) { 191 printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 192 (void*)fibptr, 193 le32_to_cpu(fibptr->hw_fib->header.XferState)); 194 } 195 fibptr->next = fibptr->dev->free_fib; 196 fibptr->dev->free_fib = fibptr; 197 } 198 spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags); 199} 200 201/** 202 * fib_init - initialise a fib 203 * @fibptr: The fib to initialize 204 * 205 * Set up the generic fib fields ready for use 206 */ 207 208void fib_init(struct fib *fibptr) 209{ 210 struct hw_fib *hw_fib = fibptr->hw_fib; 211 212 hw_fib->header.StructType = FIB_MAGIC; 213 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); 214 hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); 215 hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */ 216 hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); 217 hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size); 218} 219 220/** 221 * fib_deallocate - deallocate a fib 222 * @fibptr: fib to deallocate 223 * 224 * Will deallocate and return to the free pool the FIB pointed to by the 225 * caller. 226 */ 227 228static void fib_dealloc(struct fib * fibptr) 229{ 230 struct hw_fib *hw_fib = fibptr->hw_fib; 231 if(hw_fib->header.StructType != FIB_MAGIC) 232 BUG(); 233 hw_fib->header.XferState = 0; 234} 235 236/* 237 * Commuication primitives define and support the queuing method we use to 238 * support host to adapter commuication. All queue accesses happen through 239 * these routines and are the only routines which have a knowledge of the 240 * how these queues are implemented. 241 */ 242 243/** 244 * aac_get_entry - get a queue entry 245 * @dev: Adapter 246 * @qid: Queue Number 247 * @entry: Entry return 248 * @index: Index return 249 * @nonotify: notification control 250 * 251 * With a priority the routine returns a queue entry if the queue has free entries. If the queue 252 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is 253 * returned. 254 */ 255 256static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) 257{ 258 struct aac_queue * q; 259 unsigned long idx; 260 261 /* 262 * All of the queues wrap when they reach the end, so we check 263 * to see if they have reached the end and if they have we just 264 * set the index back to zero. This is a wrap. You could or off 265 * the high bits in all updates but this is a bit faster I think. 266 */ 267 268 q = &dev->queues->queue[qid]; 269 270 idx = *index = le32_to_cpu(*(q->headers.producer)); 271 /* Interrupt Moderation, only interrupt for first two entries */ 272 if (idx != le32_to_cpu(*(q->headers.consumer))) { 273 if (--idx == 0) { 274 if (qid == AdapNormCmdQueue) 275 idx = ADAP_NORM_CMD_ENTRIES; 276 else 277 idx = ADAP_NORM_RESP_ENTRIES; 278 } 279 if (idx != le32_to_cpu(*(q->headers.consumer))) 280 *nonotify = 1; 281 } 282 283 if (qid == AdapNormCmdQueue) { 284 if (*index >= ADAP_NORM_CMD_ENTRIES) 285 *index = 0; /* Wrap to front of the Producer Queue. */ 286 } else { 287 if (*index >= ADAP_NORM_RESP_ENTRIES) 288 *index = 0; /* Wrap to front of the Producer Queue. */ 289 } 290 291 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ 292 printk(KERN_WARNING "Queue %d full, %u outstanding.\n", 293 qid, q->numpending); 294 return 0; 295 } else { 296 *entry = q->base + *index; 297 return 1; 298 } 299} 300 301/** 302 * aac_queue_get - get the next free QE 303 * @dev: Adapter 304 * @index: Returned index 305 * @priority: Priority of fib 306 * @fib: Fib to associate with the queue entry 307 * @wait: Wait if queue full 308 * @fibptr: Driver fib object to go with fib 309 * @nonotify: Don't notify the adapter 310 * 311 * Gets the next free QE off the requested priorty adapter command 312 * queue and associates the Fib with the QE. The QE represented by 313 * index is ready to insert on the queue when this routine returns 314 * success. 315 */ 316 317static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) 318{ 319 struct aac_entry * entry = NULL; 320 int map = 0; 321 322 if (qid == AdapNormCmdQueue) { 323 /* if no entries wait for some if caller wants to */ 324 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 325 { 326 printk(KERN_ERR "GetEntries failed\n"); 327 } 328 /* 329 * Setup queue entry with a command, status and fib mapped 330 */ 331 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 332 map = 1; 333 } else { 334 while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 335 { 336 /* if no entries wait for some if caller wants to */ 337 } 338 /* 339 * Setup queue entry with command, status and fib mapped 340 */ 341 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 342 entry->addr = hw_fib->header.SenderFibAddress; 343 /* Restore adapters pointer to the FIB */ 344 hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ 345 map = 0; 346 } 347 /* 348 * If MapFib is true than we need to map the Fib and put pointers 349 * in the queue entry. 350 */ 351 if (map) 352 entry->addr = cpu_to_le32(fibptr->hw_fib_pa); 353 return 0; 354} 355 356/* 357 * Define the highest level of host to adapter communication routines. 358 * These routines will support host to adapter FS commuication. These 359 * routines have no knowledge of the commuication method used. This level 360 * sends and receives FIBs. This level has no knowledge of how these FIBs 361 * get passed back and forth. 362 */ 363 364/** 365 * fib_send - send a fib to the adapter 366 * @command: Command to send 367 * @fibptr: The fib 368 * @size: Size of fib data area 369 * @priority: Priority of Fib 370 * @wait: Async/sync select 371 * @reply: True if a reply is wanted 372 * @callback: Called with reply 373 * @callback_data: Passed to callback 374 * 375 * Sends the requested FIB to the adapter and optionally will wait for a 376 * response FIB. If the caller does not wish to wait for a response than 377 * an event to wait on must be supplied. This event will be set when a 378 * response FIB is received from the adapter. 379 */ 380 381int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) 382{ 383 struct aac_dev * dev = fibptr->dev; 384 struct hw_fib * hw_fib = fibptr->hw_fib; 385 struct aac_queue * q; 386 unsigned long flags = 0; 387 unsigned long qflags; 388 389 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 390 return -EBUSY; 391 /* 392 * There are 5 cases with the wait and reponse requested flags. 393 * The only invalid cases are if the caller requests to wait and 394 * does not request a response and if the caller does not want a 395 * response and the Fib is not allocated from pool. If a response 396 * is not requesed the Fib will just be deallocaed by the DPC 397 * routine when the response comes back from the adapter. No 398 * further processing will be done besides deleting the Fib. We 399 * will have a debug mode where the adapter can notify the host 400 * it had a problem and the host can log that fact. 401 */ 402 if (wait && !reply) { 403 return -EINVAL; 404 } else if (!wait && reply) { 405 hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected); 406 FIB_COUNTER_INCREMENT(aac_config.AsyncSent); 407 } else if (!wait && !reply) { 408 hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected); 409 FIB_COUNTER_INCREMENT(aac_config.NoResponseSent); 410 } else if (wait && reply) { 411 hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); 412 FIB_COUNTER_INCREMENT(aac_config.NormalSent); 413 } 414 /* 415 * Map the fib into 32bits by using the fib number 416 */ 417 418 hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); 419 hw_fib->header.SenderData = (u32)(fibptr - dev->fibs); 420 /* 421 * Set FIB state to indicate where it came from and if we want a 422 * response from the adapter. Also load the command from the 423 * caller. 424 * 425 * Map the hw fib pointer as a 32bit value 426 */ 427 hw_fib->header.Command = cpu_to_le16(command); 428 hw_fib->header.XferState |= cpu_to_le32(SentFromHost); 429 fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/ 430 /* 431 * Set the size of the Fib we want to send to the adapter 432 */ 433 hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); 434 if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { 435 return -EMSGSIZE; 436 } 437 /* 438 * Get a queue entry connect the FIB to it and send an notify 439 * the adapter a command is ready. 440 */ 441 hw_fib->header.XferState |= cpu_to_le32(NormalPriority); 442 443 /* 444 * Fill in the Callback and CallbackContext if we are not 445 * going to wait. 446 */ 447 if (!wait) { 448 fibptr->callback = callback; 449 fibptr->callback_data = callback_data; 450 } 451 452 fibptr->done = 0; 453 fibptr->flags = 0; 454 455 FIB_COUNTER_INCREMENT(aac_config.FibsSent); 456 457 dprintk((KERN_DEBUG "Fib contents:.\n")); 458 dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command))); 459 dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command))); 460 dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState))); 461 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); 462 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); 463 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); 464 465 q = &dev->queues->queue[AdapNormCmdQueue]; 466 467 if(wait) 468 spin_lock_irqsave(&fibptr->event_lock, flags); 469 spin_lock_irqsave(q->lock, qflags); 470 if (dev->new_comm_interface) { 471 unsigned long count = 10000000L; /* 50 seconds */ 472 list_add_tail(&fibptr->queue, &q->pendingq); 473 q->numpending++; 474 spin_unlock_irqrestore(q->lock, qflags); 475 while (aac_adapter_send(fibptr) != 0) { 476 if (--count == 0) { 477 if (wait) 478 spin_unlock_irqrestore(&fibptr->event_lock, flags); 479 spin_lock_irqsave(q->lock, qflags); 480 q->numpending--; 481 list_del(&fibptr->queue); 482 spin_unlock_irqrestore(q->lock, qflags); 483 return -ETIMEDOUT; 484 } 485 udelay(5); 486 } 487 } else { 488 u32 index; 489 unsigned long nointr = 0; 490 aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr); 491 492 list_add_tail(&fibptr->queue, &q->pendingq); 493 q->numpending++; 494 *(q->headers.producer) = cpu_to_le32(index + 1); 495 spin_unlock_irqrestore(q->lock, qflags); 496 dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); 497 if (!(nointr & aac_config.irq_mod)) 498 aac_adapter_notify(dev, AdapNormCmdQueue); 499 } 500 501 /* 502 * If the caller wanted us to wait for response wait now. 503 */ 504 505 if (wait) { 506 spin_unlock_irqrestore(&fibptr->event_lock, flags); 507 /* Only set for first known interruptable command */ 508 if (wait < 0) { 509 /* 510 * *VERY* Dangerous to time out a command, the 511 * assumption is made that we have no hope of 512 * functioning because an interrupt routing or other 513 * hardware failure has occurred. 514 */ 515 unsigned long count = 36000000L; /* 3 minutes */ 516 while (down_trylock(&fibptr->event_wait)) { 517 if (--count == 0) { 518 spin_lock_irqsave(q->lock, qflags); 519 q->numpending--; 520 list_del(&fibptr->queue); 521 spin_unlock_irqrestore(q->lock, qflags); 522 if (wait == -1) { 523 printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n" 524 "Usually a result of a PCI interrupt routing problem;\n" 525 "update mother board BIOS or consider utilizing one of\n" 526 "the SAFE mode kernel options (acpi, apic etc)\n"); 527 } 528 return -ETIMEDOUT; 529 } 530 udelay(5); 531 } 532 } else 533 down(&fibptr->event_wait); 534 if(fibptr->done == 0) 535 BUG(); 536 537 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){ 538 return -ETIMEDOUT; 539 } else { 540 return 0; 541 } 542 } 543 /* 544 * If the user does not want a response than return success otherwise 545 * return pending 546 */ 547 if (reply) 548 return -EINPROGRESS; 549 else 550 return 0; 551} 552 553/** 554 * aac_consumer_get - get the top of the queue 555 * @dev: Adapter 556 * @q: Queue 557 * @entry: Return entry 558 * 559 * Will return a pointer to the entry on the top of the queue requested that 560 * we are a consumer of, and return the address of the queue entry. It does 561 * not change the state of the queue. 562 */ 563 564int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry) 565{ 566 u32 index; 567 int status; 568 if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) { 569 status = 0; 570 } else { 571 /* 572 * The consumer index must be wrapped if we have reached 573 * the end of the queue, else we just use the entry 574 * pointed to by the header index 575 */ 576 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 577 index = 0; 578 else 579 index = le32_to_cpu(*q->headers.consumer); 580 *entry = q->base + index; 581 status = 1; 582 } 583 return(status); 584} 585 586/** 587 * aac_consumer_free - free consumer entry 588 * @dev: Adapter 589 * @q: Queue 590 * @qid: Queue ident 591 * 592 * Frees up the current top of the queue we are a consumer of. If the 593 * queue was full notify the producer that the queue is no longer full. 594 */ 595 596void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) 597{ 598 int wasfull = 0; 599 u32 notify; 600 601 if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) 602 wasfull = 1; 603 604 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 605 *q->headers.consumer = cpu_to_le32(1); 606 else 607 *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1); 608 609 if (wasfull) { 610 switch (qid) { 611 612 case HostNormCmdQueue: 613 notify = HostNormCmdNotFull; 614 break; 615 case HostNormRespQueue: 616 notify = HostNormRespNotFull; 617 break; 618 default: 619 BUG(); 620 return; 621 } 622 aac_adapter_notify(dev, notify); 623 } 624} 625 626/** 627 * fib_adapter_complete - complete adapter issued fib 628 * @fibptr: fib to complete 629 * @size: size of fib 630 * 631 * Will do all necessary work to complete a FIB that was sent from 632 * the adapter. 633 */ 634 635int fib_adapter_complete(struct fib * fibptr, unsigned short size) 636{ 637 struct hw_fib * hw_fib = fibptr->hw_fib; 638 struct aac_dev * dev = fibptr->dev; 639 struct aac_queue * q; 640 unsigned long nointr = 0; 641 unsigned long qflags; 642 643 if (hw_fib->header.XferState == 0) { 644 if (dev->new_comm_interface) 645 kfree (hw_fib); 646 return 0; 647 } 648 /* 649 * If we plan to do anything check the structure type first. 650 */ 651 if ( hw_fib->header.StructType != FIB_MAGIC ) { 652 if (dev->new_comm_interface) 653 kfree (hw_fib); 654 return -EINVAL; 655 } 656 /* 657 * This block handles the case where the adapter had sent us a 658 * command and we have finished processing the command. We 659 * call completeFib when we are done processing the command 660 * and want to send a response back to the adapter. This will 661 * send the completed cdb to the adapter. 662 */ 663 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { 664 if (dev->new_comm_interface) { 665 kfree (hw_fib); 666 } else { 667 u32 index; 668 hw_fib->header.XferState |= cpu_to_le32(HostProcessed); 669 if (size) { 670 size += sizeof(struct aac_fibhdr); 671 if (size > le16_to_cpu(hw_fib->header.SenderSize)) 672 return -EMSGSIZE; 673 hw_fib->header.Size = cpu_to_le16(size); 674 } 675 q = &dev->queues->queue[AdapNormRespQueue]; 676 spin_lock_irqsave(q->lock, qflags); 677 aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr); 678 *(q->headers.producer) = cpu_to_le32(index + 1); 679 spin_unlock_irqrestore(q->lock, qflags); 680 if (!(nointr & (int)aac_config.irq_mod)) 681 aac_adapter_notify(dev, AdapNormRespQueue); 682 } 683 } 684 else 685 { 686 printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n"); 687 BUG(); 688 } 689 return 0; 690} 691 692/** 693 * fib_complete - fib completion handler 694 * @fib: FIB to complete 695 * 696 * Will do all necessary work to complete a FIB. 697 */ 698 699int fib_complete(struct fib * fibptr) 700{ 701 struct hw_fib * hw_fib = fibptr->hw_fib; 702 703 /* 704 * Check for a fib which has already been completed 705 */ 706 707 if (hw_fib->header.XferState == 0) 708 return 0; 709 /* 710 * If we plan to do anything check the structure type first. 711 */ 712 713 if (hw_fib->header.StructType != FIB_MAGIC) 714 return -EINVAL; 715 /* 716 * This block completes a cdb which orginated on the host and we 717 * just need to deallocate the cdb or reinit it. At this point the 718 * command is complete that we had sent to the adapter and this 719 * cdb could be reused. 720 */ 721 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && 722 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) 723 { 724 fib_dealloc(fibptr); 725 } 726 else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost)) 727 { 728 /* 729 * This handles the case when the host has aborted the I/O 730 * to the adapter because the adapter is not responding 731 */ 732 fib_dealloc(fibptr); 733 } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) { 734 fib_dealloc(fibptr); 735 } else { 736 BUG(); 737 } 738 return 0; 739} 740 741/** 742 * aac_printf - handle printf from firmware 743 * @dev: Adapter 744 * @val: Message info 745 * 746 * Print a message passed to us by the controller firmware on the 747 * Adaptec board 748 */ 749 750void aac_printf(struct aac_dev *dev, u32 val) 751{ 752 char *cp = dev->printfbuf; 753 if (dev->printf_enabled) 754 { 755 int length = val & 0xffff; 756 int level = (val >> 16) & 0xffff; 757 758 /* 759 * The size of the printfbuf is set in port.c 760 * There is no variable or define for it 761 */ 762 if (length > 255) 763 length = 255; 764 if (cp[length] != 0) 765 cp[length] = 0; 766 if (level == LOG_AAC_HIGH_ERROR) 767 printk(KERN_WARNING "aacraid:%s", cp); 768 else 769 printk(KERN_INFO "aacraid:%s", cp); 770 } 771 memset(cp, 0, 256); 772} 773 774 775/** 776 * aac_handle_aif - Handle a message from the firmware 777 * @dev: Which adapter this fib is from 778 * @fibptr: Pointer to fibptr from adapter 779 * 780 * This routine handles a driver notify fib from the adapter and 781 * dispatches it to the appropriate routine for handling. 782 */ 783 784static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) 785{ 786 struct hw_fib * hw_fib = fibptr->hw_fib; 787 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; 788 int busy; 789 u32 container; 790 struct scsi_device *device; 791 enum { 792 NOTHING, 793 DELETE, 794 ADD, 795 CHANGE 796 } device_config_needed; 797 798 /* Sniff for container changes */ 799 800 if (!dev) 801 return; 802 container = (u32)-1; 803 804 /* 805 * We have set this up to try and minimize the number of 806 * re-configures that take place. As a result of this when 807 * certain AIF's come in we will set a flag waiting for another 808 * type of AIF before setting the re-config flag. 809 */ 810 switch (le32_to_cpu(aifcmd->command)) { 811 case AifCmdDriverNotify: 812 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { 813 /* 814 * Morph or Expand complete 815 */ 816 case AifDenMorphComplete: 817 case AifDenVolumeExtendComplete: 818 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 819 if (container >= dev->maximum_num_containers) 820 break; 821 822 /* 823 * Find the Scsi_Device associated with the SCSI 824 * address. Make sure we have the right array, and if 825 * so set the flag to initiate a new re-config once we 826 * see an AifEnConfigChange AIF come through. 827 */ 828 829 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { 830 device = scsi_device_lookup(dev->scsi_host_ptr, 831 CONTAINER_TO_CHANNEL(container), 832 CONTAINER_TO_ID(container), 833 CONTAINER_TO_LUN(container)); 834 if (device) { 835 dev->fsa_dev[container].config_needed = CHANGE; 836 dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; 837 scsi_device_put(device); 838 } 839 } 840 } 841 842 /* 843 * If we are waiting on something and this happens to be 844 * that thing then set the re-configure flag. 845 */ 846 if (container != (u32)-1) { 847 if (container >= dev->maximum_num_containers) 848 break; 849 if (dev->fsa_dev[container].config_waiting_on == 850 le32_to_cpu(*(u32 *)aifcmd->data)) 851 dev->fsa_dev[container].config_waiting_on = 0; 852 } else for (container = 0; 853 container < dev->maximum_num_containers; ++container) { 854 if (dev->fsa_dev[container].config_waiting_on == 855 le32_to_cpu(*(u32 *)aifcmd->data)) 856 dev->fsa_dev[container].config_waiting_on = 0; 857 } 858 break; 859 860 case AifCmdEventNotify: 861 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { 862 /* 863 * Add an Array. 864 */ 865 case AifEnAddContainer: 866 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 867 if (container >= dev->maximum_num_containers) 868 break; 869 dev->fsa_dev[container].config_needed = ADD; 870 dev->fsa_dev[container].config_waiting_on = 871 AifEnConfigChange; 872 break; 873 874 /* 875 * Delete an Array. 876 */ 877 case AifEnDeleteContainer: 878 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 879 if (container >= dev->maximum_num_containers) 880 break; 881 dev->fsa_dev[container].config_needed = DELETE; 882 dev->fsa_dev[container].config_waiting_on = 883 AifEnConfigChange; 884 break; 885 886 /* 887 * Container change detected. If we currently are not 888 * waiting on something else, setup to wait on a Config Change. 889 */ 890 case AifEnContainerChange: 891 container = le32_to_cpu(((u32 *)aifcmd->data)[1]); 892 if (container >= dev->maximum_num_containers) 893 break; 894 if (dev->fsa_dev[container].config_waiting_on) 895 break; 896 dev->fsa_dev[container].config_needed = CHANGE; 897 dev->fsa_dev[container].config_waiting_on = 898 AifEnConfigChange; 899 break; 900 901 case AifEnConfigChange: 902 break; 903 904 } 905 906 /* 907 * If we are waiting on something and this happens to be 908 * that thing then set the re-configure flag. 909 */ 910 if (container != (u32)-1) { 911 if (container >= dev->maximum_num_containers) 912 break; 913 if (dev->fsa_dev[container].config_waiting_on == 914 le32_to_cpu(*(u32 *)aifcmd->data)) 915 dev->fsa_dev[container].config_waiting_on = 0; 916 } else for (container = 0; 917 container < dev->maximum_num_containers; ++container) { 918 if (dev->fsa_dev[container].config_waiting_on == 919 le32_to_cpu(*(u32 *)aifcmd->data)) 920 dev->fsa_dev[container].config_waiting_on = 0; 921 } 922 break; 923 924 case AifCmdJobProgress: 925 /* 926 * These are job progress AIF's. When a Clear is being 927 * done on a container it is initially created then hidden from 928 * the OS. When the clear completes we don't get a config 929 * change so we monitor the job status complete on a clear then 930 * wait for a container change. 931 */ 932 933 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) 934 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5]) 935 || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) { 936 for (container = 0; 937 container < dev->maximum_num_containers; 938 ++container) { 939 /* 940 * Stomp on all config sequencing for all 941 * containers? 942 */ 943 dev->fsa_dev[container].config_waiting_on = 944 AifEnContainerChange; 945 dev->fsa_dev[container].config_needed = ADD; 946 } 947 } 948 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) 949 && (((u32 *)aifcmd->data)[6] == 0) 950 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) { 951 for (container = 0; 952 container < dev->maximum_num_containers; 953 ++container) { 954 /* 955 * Stomp on all config sequencing for all 956 * containers? 957 */ 958 dev->fsa_dev[container].config_waiting_on = 959 AifEnContainerChange; 960 dev->fsa_dev[container].config_needed = DELETE; 961 } 962 } 963 break; 964 } 965 966 device_config_needed = NOTHING; 967 for (container = 0; container < dev->maximum_num_containers; 968 ++container) { 969 if ((dev->fsa_dev[container].config_waiting_on == 0) 970 && (dev->fsa_dev[container].config_needed != NOTHING)) { 971 device_config_needed = 972 dev->fsa_dev[container].config_needed; 973 dev->fsa_dev[container].config_needed = NOTHING; 974 break; 975 } 976 } 977 if (device_config_needed == NOTHING) 978 return; 979 980 /* 981 * If we decided that a re-configuration needs to be done, 982 * schedule it here on the way out the door, please close the door 983 * behind you. 984 */ 985 986 busy = 0; 987 988 989 /* 990 * Find the Scsi_Device associated with the SCSI address, 991 * and mark it as changed, invalidating the cache. This deals 992 * with changes to existing device IDs. 993 */ 994 995 if (!dev || !dev->scsi_host_ptr) 996 return; 997 /* 998 * force reload of disk info via probe_container 999 */ 1000 if ((device_config_needed == CHANGE) 1001 && (dev->fsa_dev[container].valid == 1)) 1002 dev->fsa_dev[container].valid = 2; 1003 if ((device_config_needed == CHANGE) || 1004 (device_config_needed == ADD)) 1005 probe_container(dev, container); 1006 device = scsi_device_lookup(dev->scsi_host_ptr, 1007 CONTAINER_TO_CHANNEL(container), 1008 CONTAINER_TO_ID(container), 1009 CONTAINER_TO_LUN(container)); 1010 if (device) { 1011 switch (device_config_needed) { 1012 case DELETE: 1013 scsi_remove_device(device); 1014 break; 1015 case CHANGE: 1016 if (!dev->fsa_dev[container].valid) { 1017 scsi_remove_device(device); 1018 break; 1019 } 1020 scsi_rescan_device(&device->sdev_gendev); 1021 1022 default: 1023 break; 1024 } 1025 scsi_device_put(device); 1026 } 1027 if (device_config_needed == ADD) { 1028 scsi_add_device(dev->scsi_host_ptr, 1029 CONTAINER_TO_CHANNEL(container), 1030 CONTAINER_TO_ID(container), 1031 CONTAINER_TO_LUN(container)); 1032 } 1033 1034} 1035 1036/** 1037 * aac_command_thread - command processing thread 1038 * @dev: Adapter to monitor 1039 * 1040 * Waits on the commandready event in it's queue. When the event gets set 1041 * it will pull FIBs off it's queue. It will continue to pull FIBs off 1042 * until the queue is empty. When the queue is empty it will wait for 1043 * more FIBs. 1044 */ 1045 1046int aac_command_thread(struct aac_dev * dev) 1047{ 1048 struct hw_fib *hw_fib, *hw_newfib; 1049 struct fib *fib, *newfib; 1050 struct aac_fib_context *fibctx; 1051 unsigned long flags; 1052 DECLARE_WAITQUEUE(wait, current); 1053 1054 /* 1055 * We can only have one thread per adapter for AIF's. 1056 */ 1057 if (dev->aif_thread) 1058 return -EINVAL; 1059 /* 1060 * Set up the name that will appear in 'ps' 1061 * stored in task_struct.comm[16]. 1062 */ 1063 daemonize("aacraid"); 1064 allow_signal(SIGKILL); 1065 /* 1066 * Let the DPC know it has a place to send the AIF's to. 1067 */ 1068 dev->aif_thread = 1; 1069 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 1070 set_current_state(TASK_INTERRUPTIBLE); 1071 dprintk ((KERN_INFO "aac_command_thread start\n")); 1072 while(1) 1073 { 1074 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); 1075 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { 1076 struct list_head *entry; 1077 struct aac_aifcmd * aifcmd; 1078 1079 set_current_state(TASK_RUNNING); 1080 1081 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; 1082 list_del(entry); 1083 1084 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); 1085 fib = list_entry(entry, struct fib, fiblink); 1086 /* 1087 * We will process the FIB here or pass it to a 1088 * worker thread that is TBD. We Really can't 1089 * do anything at this point since we don't have 1090 * anything defined for this thread to do. 1091 */ 1092 hw_fib = fib->hw_fib; 1093 memset(fib, 0, sizeof(struct fib)); 1094 fib->type = FSAFS_NTC_FIB_CONTEXT; 1095 fib->size = sizeof( struct fib ); 1096 fib->hw_fib = hw_fib; 1097 fib->data = hw_fib->data; 1098 fib->dev = dev; 1099 /* 1100 * We only handle AifRequest fibs from the adapter. 1101 */ 1102 aifcmd = (struct aac_aifcmd *) hw_fib->data; 1103 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { 1104 /* Handle Driver Notify Events */ 1105 aac_handle_aif(dev, fib); 1106 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1107 fib_adapter_complete(fib, (u16)sizeof(u32)); 1108 } else { 1109 struct list_head *entry; 1110 /* The u32 here is important and intended. We are using 1111 32bit wrapping time to fit the adapter field */ 1112 1113 u32 time_now, time_last; 1114 unsigned long flagv; 1115 unsigned num; 1116 struct hw_fib ** hw_fib_pool, ** hw_fib_p; 1117 struct fib ** fib_pool, ** fib_p; 1118 1119 /* Sniff events */ 1120 if ((aifcmd->command == 1121 cpu_to_le32(AifCmdEventNotify)) || 1122 (aifcmd->command == 1123 cpu_to_le32(AifCmdJobProgress))) { 1124 aac_handle_aif(dev, fib); 1125 } 1126 1127 time_now = jiffies/HZ; 1128 1129 /* 1130 * Warning: no sleep allowed while 1131 * holding spinlock. We take the estimate 1132 * and pre-allocate a set of fibs outside the 1133 * lock. 1134 */ 1135 num = le32_to_cpu(dev->init->AdapterFibsSize) 1136 / sizeof(struct hw_fib); /* some extra */ 1137 spin_lock_irqsave(&dev->fib_lock, flagv); 1138 entry = dev->fib_list.next; 1139 while (entry != &dev->fib_list) { 1140 entry = entry->next; 1141 ++num; 1142 } 1143 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1144 hw_fib_pool = NULL; 1145 fib_pool = NULL; 1146 if (num 1147 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL))) 1148 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) { 1149 hw_fib_p = hw_fib_pool; 1150 fib_p = fib_pool; 1151 while (hw_fib_p < &hw_fib_pool[num]) { 1152 if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) { 1153 --hw_fib_p; 1154 break; 1155 } 1156 if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) { 1157 kfree(*(--hw_fib_p)); 1158 break; 1159 } 1160 } 1161 if ((num = hw_fib_p - hw_fib_pool) == 0) { 1162 kfree(fib_pool); 1163 fib_pool = NULL; 1164 kfree(hw_fib_pool); 1165 hw_fib_pool = NULL; 1166 } 1167 } else if (hw_fib_pool) { 1168 kfree(hw_fib_pool); 1169 hw_fib_pool = NULL; 1170 } 1171 spin_lock_irqsave(&dev->fib_lock, flagv); 1172 entry = dev->fib_list.next; 1173 /* 1174 * For each Context that is on the 1175 * fibctxList, make a copy of the 1176 * fib, and then set the event to wake up the 1177 * thread that is waiting for it. 1178 */ 1179 hw_fib_p = hw_fib_pool; 1180 fib_p = fib_pool; 1181 while (entry != &dev->fib_list) { 1182 /* 1183 * Extract the fibctx 1184 */ 1185 fibctx = list_entry(entry, struct aac_fib_context, next); 1186 /* 1187 * Check if the queue is getting 1188 * backlogged 1189 */ 1190 if (fibctx->count > 20) 1191 { 1192 /* 1193 * It's *not* jiffies folks, 1194 * but jiffies / HZ so do not 1195 * panic ... 1196 */ 1197 time_last = fibctx->jiffies; 1198 /* 1199 * Has it been > 2 minutes 1200 * since the last read off 1201 * the queue? 1202 */ 1203 if ((time_now - time_last) > 120) { 1204 entry = entry->next; 1205 aac_close_fib_context(dev, fibctx); 1206 continue; 1207 } 1208 } 1209 /* 1210 * Warning: no sleep allowed while 1211 * holding spinlock 1212 */ 1213 if (hw_fib_p < &hw_fib_pool[num]) { 1214 hw_newfib = *hw_fib_p; 1215 *(hw_fib_p++) = NULL; 1216 newfib = *fib_p; 1217 *(fib_p++) = NULL; 1218 /* 1219 * Make the copy of the FIB 1220 */ 1221 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); 1222 memcpy(newfib, fib, sizeof(struct fib)); 1223 newfib->hw_fib = hw_newfib; 1224 /* 1225 * Put the FIB onto the 1226 * fibctx's fibs 1227 */ 1228 list_add_tail(&newfib->fiblink, &fibctx->fib_list); 1229 fibctx->count++; 1230 /* 1231 * Set the event to wake up the 1232 * thread that is waiting. 1233 */ 1234 up(&fibctx->wait_sem); 1235 } else { 1236 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); 1237 } 1238 entry = entry->next; 1239 } 1240 /* 1241 * Set the status of this FIB 1242 */ 1243 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1244 fib_adapter_complete(fib, sizeof(u32)); 1245 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1246 /* Free up the remaining resources */ 1247 hw_fib_p = hw_fib_pool; 1248 fib_p = fib_pool; 1249 while (hw_fib_p < &hw_fib_pool[num]) { 1250 if (*hw_fib_p) 1251 kfree(*hw_fib_p); 1252 if (*fib_p) 1253 kfree(*fib_p); 1254 ++fib_p; 1255 ++hw_fib_p; 1256 } 1257 if (hw_fib_pool) 1258 kfree(hw_fib_pool); 1259 if (fib_pool) 1260 kfree(fib_pool); 1261 } 1262 kfree(fib); 1263 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); 1264 } 1265 /* 1266 * There are no more AIF's 1267 */ 1268 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); 1269 schedule(); 1270 1271 if(signal_pending(current)) 1272 break; 1273 set_current_state(TASK_INTERRUPTIBLE); 1274 } 1275 if (dev->queues) 1276 remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 1277 dev->aif_thread = 0; 1278 complete_and_exit(&dev->aif_completion, 0); 1279 return 0; 1280} 1281