commsup.c revision a8166a52968216ae079a5530ac3269147de2ef31
1/*
2 *	Adaptec AAC series RAID controller driver
3 *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING.  If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 *  commsup.c
26 *
27 * Abstract: Contain all routines that are required for FSA host/adapter
28 *    communication.
29 *
30 */
31
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/types.h>
35#include <linux/sched.h>
36#include <linux/pci.h>
37#include <linux/spinlock.h>
38#include <linux/slab.h>
39#include <linux/completion.h>
40#include <linux/blkdev.h>
41#include <linux/delay.h>
42#include <linux/kthread.h>
43#include <linux/interrupt.h>
44#include <scsi/scsi.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_device.h>
47#include <scsi/scsi_cmnd.h>
48#include <asm/semaphore.h>
49
50#include "aacraid.h"
51
52/**
53 *	fib_map_alloc		-	allocate the fib objects
54 *	@dev: Adapter to allocate for
55 *
56 *	Allocate and map the shared PCI space for the FIB blocks used to
57 *	talk to the Adaptec firmware.
58 */
59
60static int fib_map_alloc(struct aac_dev *dev)
61{
62	dprintk((KERN_INFO
63	  "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
64	  dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
65	  AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
66	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
67	  * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
68	  &dev->hw_fib_pa))==NULL)
69		return -ENOMEM;
70	return 0;
71}
72
73/**
74 *	aac_fib_map_free		-	free the fib objects
75 *	@dev: Adapter to free
76 *
77 *	Free the PCI mappings and the memory allocated for FIB blocks
78 *	on this adapter.
79 */
80
81void aac_fib_map_free(struct aac_dev *dev)
82{
83	pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
84}
85
86/**
87 *	aac_fib_setup	-	setup the fibs
88 *	@dev: Adapter to set up
89 *
90 *	Allocate the PCI space for the fibs, map it and then intialise the
91 *	fib area, the unmapped fib data and also the free list
92 */
93
94int aac_fib_setup(struct aac_dev * dev)
95{
96	struct fib *fibptr;
97	struct hw_fib *hw_fib;
98	dma_addr_t hw_fib_pa;
99	int i;
100
101	while (((i = fib_map_alloc(dev)) == -ENOMEM)
102	 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
103		dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
104		dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
105	}
106	if (i<0)
107		return -ENOMEM;
108
109	hw_fib = dev->hw_fib_va;
110	hw_fib_pa = dev->hw_fib_pa;
111	memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
112	/*
113	 *	Initialise the fibs
114	 */
115	for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
116	{
117		fibptr->dev = dev;
118		fibptr->hw_fib_va = hw_fib;
119		fibptr->data = (void *) fibptr->hw_fib_va->data;
120		fibptr->next = fibptr+1;	/* Forward chain the fibs */
121		init_MUTEX_LOCKED(&fibptr->event_wait);
122		spin_lock_init(&fibptr->event_lock);
123		hw_fib->header.XferState = cpu_to_le32(0xffffffff);
124		hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
125		fibptr->hw_fib_pa = hw_fib_pa;
126		hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size);
127		hw_fib_pa = hw_fib_pa + dev->max_fib_size;
128	}
129	/*
130	 *	Add the fib chain to the free list
131	 */
132	dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
133	/*
134	 *	Enable this to debug out of queue space
135	 */
136	dev->free_fib = &dev->fibs[0];
137	return 0;
138}
139
140/**
141 *	aac_fib_alloc	-	allocate a fib
142 *	@dev: Adapter to allocate the fib for
143 *
144 *	Allocate a fib from the adapter fib pool. If the pool is empty we
145 *	return NULL.
146 */
147
148struct fib *aac_fib_alloc(struct aac_dev *dev)
149{
150	struct fib * fibptr;
151	unsigned long flags;
152	spin_lock_irqsave(&dev->fib_lock, flags);
153	fibptr = dev->free_fib;
154	if(!fibptr){
155		spin_unlock_irqrestore(&dev->fib_lock, flags);
156		return fibptr;
157	}
158	dev->free_fib = fibptr->next;
159	spin_unlock_irqrestore(&dev->fib_lock, flags);
160	/*
161	 *	Set the proper node type code and node byte size
162	 */
163	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
164	fibptr->size = sizeof(struct fib);
165	/*
166	 *	Null out fields that depend on being zero at the start of
167	 *	each I/O
168	 */
169	fibptr->hw_fib_va->header.XferState = 0;
170	fibptr->callback = NULL;
171	fibptr->callback_data = NULL;
172
173	return fibptr;
174}
175
176/**
177 *	aac_fib_free	-	free a fib
178 *	@fibptr: fib to free up
179 *
180 *	Frees up a fib and places it on the appropriate queue
181 *	(either free or timed out)
182 */
183
184void aac_fib_free(struct fib *fibptr)
185{
186	unsigned long flags;
187
188	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
189	if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
190		aac_config.fib_timeouts++;
191		fibptr->next = fibptr->dev->timeout_fib;
192		fibptr->dev->timeout_fib = fibptr;
193	} else {
194		if (fibptr->hw_fib_va->header.XferState != 0) {
195			printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
196				 (void*)fibptr,
197				 le32_to_cpu(fibptr->hw_fib_va->header.XferState));
198		}
199		fibptr->next = fibptr->dev->free_fib;
200		fibptr->dev->free_fib = fibptr;
201	}
202	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
203}
204
205/**
206 *	aac_fib_init	-	initialise a fib
207 *	@fibptr: The fib to initialize
208 *
209 *	Set up the generic fib fields ready for use
210 */
211
212void aac_fib_init(struct fib *fibptr)
213{
214	struct hw_fib *hw_fib = fibptr->hw_fib_va;
215
216	hw_fib->header.StructType = FIB_MAGIC;
217	hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
218	hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
219	hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */
220	hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
221	hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
222}
223
224/**
225 *	fib_deallocate		-	deallocate a fib
226 *	@fibptr: fib to deallocate
227 *
228 *	Will deallocate and return to the free pool the FIB pointed to by the
229 *	caller.
230 */
231
232static void fib_dealloc(struct fib * fibptr)
233{
234	struct hw_fib *hw_fib = fibptr->hw_fib_va;
235	BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
236	hw_fib->header.XferState = 0;
237}
238
239/*
240 *	Commuication primitives define and support the queuing method we use to
241 *	support host to adapter commuication. All queue accesses happen through
242 *	these routines and are the only routines which have a knowledge of the
243 *	 how these queues are implemented.
244 */
245
246/**
247 *	aac_get_entry		-	get a queue entry
248 *	@dev: Adapter
249 *	@qid: Queue Number
250 *	@entry: Entry return
251 *	@index: Index return
252 *	@nonotify: notification control
253 *
254 *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
255 *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
256 *	returned.
257 */
258
259static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
260{
261	struct aac_queue * q;
262	unsigned long idx;
263
264	/*
265	 *	All of the queues wrap when they reach the end, so we check
266	 *	to see if they have reached the end and if they have we just
267	 *	set the index back to zero. This is a wrap. You could or off
268	 *	the high bits in all updates but this is a bit faster I think.
269	 */
270
271	q = &dev->queues->queue[qid];
272
273	idx = *index = le32_to_cpu(*(q->headers.producer));
274	/* Interrupt Moderation, only interrupt for first two entries */
275	if (idx != le32_to_cpu(*(q->headers.consumer))) {
276		if (--idx == 0) {
277			if (qid == AdapNormCmdQueue)
278				idx = ADAP_NORM_CMD_ENTRIES;
279			else
280				idx = ADAP_NORM_RESP_ENTRIES;
281		}
282		if (idx != le32_to_cpu(*(q->headers.consumer)))
283			*nonotify = 1;
284	}
285
286	if (qid == AdapNormCmdQueue) {
287	        if (*index >= ADAP_NORM_CMD_ENTRIES)
288			*index = 0; /* Wrap to front of the Producer Queue. */
289	} else {
290		if (*index >= ADAP_NORM_RESP_ENTRIES)
291			*index = 0; /* Wrap to front of the Producer Queue. */
292	}
293
294        if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
295		printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
296				qid, q->numpending);
297		return 0;
298	} else {
299	        *entry = q->base + *index;
300		return 1;
301	}
302}
303
304/**
305 *	aac_queue_get		-	get the next free QE
306 *	@dev: Adapter
307 *	@index: Returned index
308 *	@priority: Priority of fib
309 *	@fib: Fib to associate with the queue entry
310 *	@wait: Wait if queue full
311 *	@fibptr: Driver fib object to go with fib
312 *	@nonotify: Don't notify the adapter
313 *
314 *	Gets the next free QE off the requested priorty adapter command
315 *	queue and associates the Fib with the QE. The QE represented by
316 *	index is ready to insert on the queue when this routine returns
317 *	success.
318 */
319
320int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
321{
322	struct aac_entry * entry = NULL;
323	int map = 0;
324
325	if (qid == AdapNormCmdQueue) {
326		/*  if no entries wait for some if caller wants to */
327        	while (!aac_get_entry(dev, qid, &entry, index, nonotify))
328        	{
329			printk(KERN_ERR "GetEntries failed\n");
330		}
331	        /*
332	         *	Setup queue entry with a command, status and fib mapped
333	         */
334	        entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
335	        map = 1;
336	} else {
337	        while(!aac_get_entry(dev, qid, &entry, index, nonotify))
338	        {
339			/* if no entries wait for some if caller wants to */
340		}
341        	/*
342        	 *	Setup queue entry with command, status and fib mapped
343        	 */
344        	entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
345        	entry->addr = hw_fib->header.SenderFibAddress;
346     			/* Restore adapters pointer to the FIB */
347		hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;	/* Let the adapter now where to find its data */
348        	map = 0;
349	}
350	/*
351	 *	If MapFib is true than we need to map the Fib and put pointers
352	 *	in the queue entry.
353	 */
354	if (map)
355		entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
356	return 0;
357}
358
359/*
360 *	Define the highest level of host to adapter communication routines.
361 *	These routines will support host to adapter FS commuication. These
362 *	routines have no knowledge of the commuication method used. This level
363 *	sends and receives FIBs. This level has no knowledge of how these FIBs
364 *	get passed back and forth.
365 */
366
367/**
368 *	aac_fib_send	-	send a fib to the adapter
369 *	@command: Command to send
370 *	@fibptr: The fib
371 *	@size: Size of fib data area
372 *	@priority: Priority of Fib
373 *	@wait: Async/sync select
374 *	@reply: True if a reply is wanted
375 *	@callback: Called with reply
376 *	@callback_data: Passed to callback
377 *
378 *	Sends the requested FIB to the adapter and optionally will wait for a
379 *	response FIB. If the caller does not wish to wait for a response than
380 *	an event to wait on must be supplied. This event will be set when a
381 *	response FIB is received from the adapter.
382 */
383
384int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
385		int priority, int wait, int reply, fib_callback callback,
386		void *callback_data)
387{
388	struct aac_dev * dev = fibptr->dev;
389	struct hw_fib * hw_fib = fibptr->hw_fib_va;
390	unsigned long flags = 0;
391	unsigned long qflags;
392
393	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
394		return -EBUSY;
395	/*
396	 *	There are 5 cases with the wait and reponse requested flags.
397	 *	The only invalid cases are if the caller requests to wait and
398	 *	does not request a response and if the caller does not want a
399	 *	response and the Fib is not allocated from pool. If a response
400	 *	is not requesed the Fib will just be deallocaed by the DPC
401	 *	routine when the response comes back from the adapter. No
402	 *	further processing will be done besides deleting the Fib. We
403	 *	will have a debug mode where the adapter can notify the host
404	 *	it had a problem and the host can log that fact.
405	 */
406	if (wait && !reply) {
407		return -EINVAL;
408	} else if (!wait && reply) {
409		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
410		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
411	} else if (!wait && !reply) {
412		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
413		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
414	} else if (wait && reply) {
415		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
416		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
417	}
418	/*
419	 *	Map the fib into 32bits by using the fib number
420	 */
421
422	hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
423	hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
424	/*
425	 *	Set FIB state to indicate where it came from and if we want a
426	 *	response from the adapter. Also load the command from the
427	 *	caller.
428	 *
429	 *	Map the hw fib pointer as a 32bit value
430	 */
431	hw_fib->header.Command = cpu_to_le16(command);
432	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
433	fibptr->hw_fib_va->header.Flags = 0;	/* 0 the flags field - internal only*/
434	/*
435	 *	Set the size of the Fib we want to send to the adapter
436	 */
437	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
438	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
439		return -EMSGSIZE;
440	}
441	/*
442	 *	Get a queue entry connect the FIB to it and send an notify
443	 *	the adapter a command is ready.
444	 */
445	hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
446
447	/*
448	 *	Fill in the Callback and CallbackContext if we are not
449	 *	going to wait.
450	 */
451	if (!wait) {
452		fibptr->callback = callback;
453		fibptr->callback_data = callback_data;
454	}
455
456	fibptr->done = 0;
457	fibptr->flags = 0;
458
459	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
460
461	dprintk((KERN_DEBUG "Fib contents:.\n"));
462	dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
463	dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
464	dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
465	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib_va));
466	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
467	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
468
469	if (!dev->queues)
470		return -EBUSY;
471
472	if(wait)
473		spin_lock_irqsave(&fibptr->event_lock, flags);
474	aac_adapter_deliver(fibptr);
475
476	/*
477	 *	If the caller wanted us to wait for response wait now.
478	 */
479
480	if (wait) {
481		spin_unlock_irqrestore(&fibptr->event_lock, flags);
482		/* Only set for first known interruptable command */
483		if (wait < 0) {
484			/*
485			 * *VERY* Dangerous to time out a command, the
486			 * assumption is made that we have no hope of
487			 * functioning because an interrupt routing or other
488			 * hardware failure has occurred.
489			 */
490			unsigned long count = 36000000L; /* 3 minutes */
491			while (down_trylock(&fibptr->event_wait)) {
492				int blink;
493				if (--count == 0) {
494					struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
495					spin_lock_irqsave(q->lock, qflags);
496					q->numpending--;
497					spin_unlock_irqrestore(q->lock, qflags);
498					if (wait == -1) {
499	        				printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
500						  "Usually a result of a PCI interrupt routing problem;\n"
501						  "update mother board BIOS or consider utilizing one of\n"
502						  "the SAFE mode kernel options (acpi, apic etc)\n");
503					}
504					return -ETIMEDOUT;
505				}
506				if ((blink = aac_adapter_check_health(dev)) > 0) {
507					if (wait == -1) {
508	        				printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
509						  "Usually a result of a serious unrecoverable hardware problem\n",
510						  blink);
511					}
512					return -EFAULT;
513				}
514				udelay(5);
515			}
516		} else if (down_interruptible(&fibptr->event_wait)) {
517			spin_lock_irqsave(&fibptr->event_lock, flags);
518			if (fibptr->done == 0) {
519				fibptr->done = 2; /* Tell interrupt we aborted */
520				spin_unlock_irqrestore(&fibptr->event_lock, flags);
521				return -EINTR;
522			}
523			spin_unlock_irqrestore(&fibptr->event_lock, flags);
524		}
525		BUG_ON(fibptr->done == 0);
526
527		if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
528			return -ETIMEDOUT;
529		} else {
530			return 0;
531		}
532	}
533	/*
534	 *	If the user does not want a response than return success otherwise
535	 *	return pending
536	 */
537	if (reply)
538		return -EINPROGRESS;
539	else
540		return 0;
541}
542
543/**
544 *	aac_consumer_get	-	get the top of the queue
545 *	@dev: Adapter
546 *	@q: Queue
547 *	@entry: Return entry
548 *
549 *	Will return a pointer to the entry on the top of the queue requested that
550 * 	we are a consumer of, and return the address of the queue entry. It does
551 *	not change the state of the queue.
552 */
553
554int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
555{
556	u32 index;
557	int status;
558	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
559		status = 0;
560	} else {
561		/*
562		 *	The consumer index must be wrapped if we have reached
563		 *	the end of the queue, else we just use the entry
564		 *	pointed to by the header index
565		 */
566		if (le32_to_cpu(*q->headers.consumer) >= q->entries)
567			index = 0;
568		else
569		        index = le32_to_cpu(*q->headers.consumer);
570		*entry = q->base + index;
571		status = 1;
572	}
573	return(status);
574}
575
576/**
577 *	aac_consumer_free	-	free consumer entry
578 *	@dev: Adapter
579 *	@q: Queue
580 *	@qid: Queue ident
581 *
582 *	Frees up the current top of the queue we are a consumer of. If the
583 *	queue was full notify the producer that the queue is no longer full.
584 */
585
586void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
587{
588	int wasfull = 0;
589	u32 notify;
590
591	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
592		wasfull = 1;
593
594	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
595		*q->headers.consumer = cpu_to_le32(1);
596	else
597		*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
598
599	if (wasfull) {
600		switch (qid) {
601
602		case HostNormCmdQueue:
603			notify = HostNormCmdNotFull;
604			break;
605		case HostNormRespQueue:
606			notify = HostNormRespNotFull;
607			break;
608		default:
609			BUG();
610			return;
611		}
612		aac_adapter_notify(dev, notify);
613	}
614}
615
616/**
617 *	aac_fib_adapter_complete	-	complete adapter issued fib
618 *	@fibptr: fib to complete
619 *	@size: size of fib
620 *
621 *	Will do all necessary work to complete a FIB that was sent from
622 *	the adapter.
623 */
624
625int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
626{
627	struct hw_fib * hw_fib = fibptr->hw_fib_va;
628	struct aac_dev * dev = fibptr->dev;
629	struct aac_queue * q;
630	unsigned long nointr = 0;
631	unsigned long qflags;
632
633	if (hw_fib->header.XferState == 0) {
634		if (dev->comm_interface == AAC_COMM_MESSAGE)
635			kfree (hw_fib);
636        	return 0;
637	}
638	/*
639	 *	If we plan to do anything check the structure type first.
640	 */
641	if ( hw_fib->header.StructType != FIB_MAGIC ) {
642		if (dev->comm_interface == AAC_COMM_MESSAGE)
643			kfree (hw_fib);
644        	return -EINVAL;
645	}
646	/*
647	 *	This block handles the case where the adapter had sent us a
648	 *	command and we have finished processing the command. We
649	 *	call completeFib when we are done processing the command
650	 *	and want to send a response back to the adapter. This will
651	 *	send the completed cdb to the adapter.
652	 */
653	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
654		if (dev->comm_interface == AAC_COMM_MESSAGE) {
655			kfree (hw_fib);
656		} else {
657	       		u32 index;
658		        hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
659			if (size) {
660				size += sizeof(struct aac_fibhdr);
661				if (size > le16_to_cpu(hw_fib->header.SenderSize))
662					return -EMSGSIZE;
663				hw_fib->header.Size = cpu_to_le16(size);
664			}
665			q = &dev->queues->queue[AdapNormRespQueue];
666			spin_lock_irqsave(q->lock, qflags);
667			aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
668			*(q->headers.producer) = cpu_to_le32(index + 1);
669			spin_unlock_irqrestore(q->lock, qflags);
670			if (!(nointr & (int)aac_config.irq_mod))
671				aac_adapter_notify(dev, AdapNormRespQueue);
672		}
673	}
674	else
675	{
676        	printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n");
677        	BUG();
678	}
679	return 0;
680}
681
682/**
683 *	aac_fib_complete	-	fib completion handler
684 *	@fib: FIB to complete
685 *
686 *	Will do all necessary work to complete a FIB.
687 */
688
689int aac_fib_complete(struct fib *fibptr)
690{
691	struct hw_fib * hw_fib = fibptr->hw_fib_va;
692
693	/*
694	 *	Check for a fib which has already been completed
695	 */
696
697	if (hw_fib->header.XferState == 0)
698        	return 0;
699	/*
700	 *	If we plan to do anything check the structure type first.
701	 */
702
703	if (hw_fib->header.StructType != FIB_MAGIC)
704	        return -EINVAL;
705	/*
706	 *	This block completes a cdb which orginated on the host and we
707	 *	just need to deallocate the cdb or reinit it. At this point the
708	 *	command is complete that we had sent to the adapter and this
709	 *	cdb could be reused.
710	 */
711	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
712		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
713	{
714		fib_dealloc(fibptr);
715	}
716	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
717	{
718		/*
719		 *	This handles the case when the host has aborted the I/O
720		 *	to the adapter because the adapter is not responding
721		 */
722		fib_dealloc(fibptr);
723	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
724		fib_dealloc(fibptr);
725	} else {
726		BUG();
727	}
728	return 0;
729}
730
731/**
732 *	aac_printf	-	handle printf from firmware
733 *	@dev: Adapter
734 *	@val: Message info
735 *
736 *	Print a message passed to us by the controller firmware on the
737 *	Adaptec board
738 */
739
740void aac_printf(struct aac_dev *dev, u32 val)
741{
742	char *cp = dev->printfbuf;
743	if (dev->printf_enabled)
744	{
745		int length = val & 0xffff;
746		int level = (val >> 16) & 0xffff;
747
748		/*
749		 *	The size of the printfbuf is set in port.c
750		 *	There is no variable or define for it
751		 */
752		if (length > 255)
753			length = 255;
754		if (cp[length] != 0)
755			cp[length] = 0;
756		if (level == LOG_AAC_HIGH_ERROR)
757			printk(KERN_WARNING "%s:%s", dev->name, cp);
758		else
759			printk(KERN_INFO "%s:%s", dev->name, cp);
760	}
761	memset(cp, 0,  256);
762}
763
764
765/**
766 *	aac_handle_aif		-	Handle a message from the firmware
767 *	@dev: Which adapter this fib is from
768 *	@fibptr: Pointer to fibptr from adapter
769 *
770 *	This routine handles a driver notify fib from the adapter and
771 *	dispatches it to the appropriate routine for handling.
772 */
773
774#define AIF_SNIFF_TIMEOUT	(30*HZ)
775static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
776{
777	struct hw_fib * hw_fib = fibptr->hw_fib_va;
778	struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
779	u32 container;
780	struct scsi_device *device;
781	enum {
782		NOTHING,
783		DELETE,
784		ADD,
785		CHANGE
786	} device_config_needed;
787
788	/* Sniff for container changes */
789
790	if (!dev || !dev->fsa_dev)
791		return;
792	container = (u32)-1;
793
794	/*
795	 *	We have set this up to try and minimize the number of
796	 * re-configures that take place. As a result of this when
797	 * certain AIF's come in we will set a flag waiting for another
798	 * type of AIF before setting the re-config flag.
799	 */
800	switch (le32_to_cpu(aifcmd->command)) {
801	case AifCmdDriverNotify:
802		switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
803		/*
804		 *	Morph or Expand complete
805		 */
806		case AifDenMorphComplete:
807		case AifDenVolumeExtendComplete:
808			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
809			if (container >= dev->maximum_num_containers)
810				break;
811
812			/*
813			 *	Find the scsi_device associated with the SCSI
814			 * address. Make sure we have the right array, and if
815			 * so set the flag to initiate a new re-config once we
816			 * see an AifEnConfigChange AIF come through.
817			 */
818
819			if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
820				device = scsi_device_lookup(dev->scsi_host_ptr,
821					CONTAINER_TO_CHANNEL(container),
822					CONTAINER_TO_ID(container),
823					CONTAINER_TO_LUN(container));
824				if (device) {
825					dev->fsa_dev[container].config_needed = CHANGE;
826					dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
827					dev->fsa_dev[container].config_waiting_stamp = jiffies;
828					scsi_device_put(device);
829				}
830			}
831		}
832
833		/*
834		 *	If we are waiting on something and this happens to be
835		 * that thing then set the re-configure flag.
836		 */
837		if (container != (u32)-1) {
838			if (container >= dev->maximum_num_containers)
839				break;
840			if ((dev->fsa_dev[container].config_waiting_on ==
841			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
842			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
843				dev->fsa_dev[container].config_waiting_on = 0;
844		} else for (container = 0;
845		    container < dev->maximum_num_containers; ++container) {
846			if ((dev->fsa_dev[container].config_waiting_on ==
847			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
848			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
849				dev->fsa_dev[container].config_waiting_on = 0;
850		}
851		break;
852
853	case AifCmdEventNotify:
854		switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
855		/*
856		 *	Add an Array.
857		 */
858		case AifEnAddContainer:
859			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
860			if (container >= dev->maximum_num_containers)
861				break;
862			dev->fsa_dev[container].config_needed = ADD;
863			dev->fsa_dev[container].config_waiting_on =
864				AifEnConfigChange;
865			dev->fsa_dev[container].config_waiting_stamp = jiffies;
866			break;
867
868		/*
869		 *	Delete an Array.
870		 */
871		case AifEnDeleteContainer:
872			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
873			if (container >= dev->maximum_num_containers)
874				break;
875			dev->fsa_dev[container].config_needed = DELETE;
876			dev->fsa_dev[container].config_waiting_on =
877				AifEnConfigChange;
878			dev->fsa_dev[container].config_waiting_stamp = jiffies;
879			break;
880
881		/*
882		 *	Container change detected. If we currently are not
883		 * waiting on something else, setup to wait on a Config Change.
884		 */
885		case AifEnContainerChange:
886			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
887			if (container >= dev->maximum_num_containers)
888				break;
889			if (dev->fsa_dev[container].config_waiting_on &&
890			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
891				break;
892			dev->fsa_dev[container].config_needed = CHANGE;
893			dev->fsa_dev[container].config_waiting_on =
894				AifEnConfigChange;
895			dev->fsa_dev[container].config_waiting_stamp = jiffies;
896			break;
897
898		case AifEnConfigChange:
899			break;
900
901		}
902
903		/*
904		 *	If we are waiting on something and this happens to be
905		 * that thing then set the re-configure flag.
906		 */
907		if (container != (u32)-1) {
908			if (container >= dev->maximum_num_containers)
909				break;
910			if ((dev->fsa_dev[container].config_waiting_on ==
911			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
912			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
913				dev->fsa_dev[container].config_waiting_on = 0;
914		} else for (container = 0;
915		    container < dev->maximum_num_containers; ++container) {
916			if ((dev->fsa_dev[container].config_waiting_on ==
917			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
918			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
919				dev->fsa_dev[container].config_waiting_on = 0;
920		}
921		break;
922
923	case AifCmdJobProgress:
924		/*
925		 *	These are job progress AIF's. When a Clear is being
926		 * done on a container it is initially created then hidden from
927		 * the OS. When the clear completes we don't get a config
928		 * change so we monitor the job status complete on a clear then
929		 * wait for a container change.
930		 */
931
932		if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
933		 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
934		  || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
935			for (container = 0;
936			    container < dev->maximum_num_containers;
937			    ++container) {
938				/*
939				 * Stomp on all config sequencing for all
940				 * containers?
941				 */
942				dev->fsa_dev[container].config_waiting_on =
943					AifEnContainerChange;
944				dev->fsa_dev[container].config_needed = ADD;
945				dev->fsa_dev[container].config_waiting_stamp =
946					jiffies;
947			}
948		}
949		if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
950		 && (((u32 *)aifcmd->data)[6] == 0)
951		 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
952			for (container = 0;
953			    container < dev->maximum_num_containers;
954			    ++container) {
955				/*
956				 * Stomp on all config sequencing for all
957				 * containers?
958				 */
959				dev->fsa_dev[container].config_waiting_on =
960					AifEnContainerChange;
961				dev->fsa_dev[container].config_needed = DELETE;
962				dev->fsa_dev[container].config_waiting_stamp =
963					jiffies;
964			}
965		}
966		break;
967	}
968
969	device_config_needed = NOTHING;
970	for (container = 0; container < dev->maximum_num_containers;
971	    ++container) {
972		if ((dev->fsa_dev[container].config_waiting_on == 0) &&
973			(dev->fsa_dev[container].config_needed != NOTHING) &&
974			time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
975			device_config_needed =
976				dev->fsa_dev[container].config_needed;
977			dev->fsa_dev[container].config_needed = NOTHING;
978			break;
979		}
980	}
981	if (device_config_needed == NOTHING)
982		return;
983
984	/*
985	 *	If we decided that a re-configuration needs to be done,
986	 * schedule it here on the way out the door, please close the door
987	 * behind you.
988	 */
989
990	/*
991	 *	Find the scsi_device associated with the SCSI address,
992	 * and mark it as changed, invalidating the cache. This deals
993	 * with changes to existing device IDs.
994	 */
995
996	if (!dev || !dev->scsi_host_ptr)
997		return;
998	/*
999	 * force reload of disk info via aac_probe_container
1000	 */
1001	if ((device_config_needed == CHANGE)
1002	 && (dev->fsa_dev[container].valid == 1))
1003		dev->fsa_dev[container].valid = 2;
1004	if ((device_config_needed == CHANGE) ||
1005			(device_config_needed == ADD))
1006		aac_probe_container(dev, container);
1007	device = scsi_device_lookup(dev->scsi_host_ptr,
1008		CONTAINER_TO_CHANNEL(container),
1009		CONTAINER_TO_ID(container),
1010		CONTAINER_TO_LUN(container));
1011	if (device) {
1012		switch (device_config_needed) {
1013		case DELETE:
1014		case CHANGE:
1015			scsi_rescan_device(&device->sdev_gendev);
1016
1017		default:
1018			break;
1019		}
1020		scsi_device_put(device);
1021	}
1022	if (device_config_needed == ADD) {
1023		scsi_add_device(dev->scsi_host_ptr,
1024		  CONTAINER_TO_CHANNEL(container),
1025		  CONTAINER_TO_ID(container),
1026		  CONTAINER_TO_LUN(container));
1027	}
1028
1029}
1030
1031static int _aac_reset_adapter(struct aac_dev *aac)
1032{
1033	int index, quirks;
1034	int retval;
1035	struct Scsi_Host *host;
1036	struct scsi_device *dev;
1037	struct scsi_cmnd *command;
1038	struct scsi_cmnd *command_list;
1039
1040	/*
1041	 * Assumptions:
1042	 *	- host is locked.
1043	 *	- in_reset is asserted, so no new i/o is getting to the
1044	 *	  card.
1045	 *	- The card is dead.
1046	 */
1047	host = aac->scsi_host_ptr;
1048	scsi_block_requests(host);
1049	aac_adapter_disable_int(aac);
1050	spin_unlock_irq(host->host_lock);
1051	kthread_stop(aac->thread);
1052
1053	/*
1054	 *	If a positive health, means in a known DEAD PANIC
1055	 * state and the adapter could be reset to `try again'.
1056	 */
1057	retval = aac_adapter_restart(aac, aac_adapter_check_health(aac));
1058
1059	if (retval)
1060		goto out;
1061
1062	/*
1063	 *	Loop through the fibs, close the synchronous FIBS
1064	 */
1065	for (index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
1066		struct fib *fib = &aac->fibs[index];
1067		if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1068		  (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
1069			unsigned long flagv;
1070			spin_lock_irqsave(&fib->event_lock, flagv);
1071			up(&fib->event_wait);
1072			spin_unlock_irqrestore(&fib->event_lock, flagv);
1073			schedule();
1074		}
1075	}
1076	index = aac->cardtype;
1077
1078	/*
1079	 * Re-initialize the adapter, first free resources, then carefully
1080	 * apply the initialization sequence to come back again. Only risk
1081	 * is a change in Firmware dropping cache, it is assumed the caller
1082	 * will ensure that i/o is queisced and the card is flushed in that
1083	 * case.
1084	 */
1085	aac_fib_map_free(aac);
1086	aac->hw_fib_va = NULL;
1087	aac->hw_fib_pa = 0;
1088	pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1089	aac->comm_addr = NULL;
1090	aac->comm_phys = 0;
1091	kfree(aac->queues);
1092	aac->queues = NULL;
1093	free_irq(aac->pdev->irq, aac);
1094	kfree(aac->fsa_dev);
1095	aac->fsa_dev = NULL;
1096	if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
1097		if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
1098		  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
1099			goto out;
1100	} else {
1101		if (((retval = pci_set_dma_mask(aac->pdev, 0x7FFFFFFFULL))) ||
1102		  ((retval = pci_set_consistent_dma_mask(aac->pdev, 0x7FFFFFFFULL))))
1103			goto out;
1104	}
1105	if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1106		goto out;
1107	if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
1108		if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
1109			goto out;
1110	aac->thread = kthread_run(aac_command_thread, aac, aac->name);
1111	if (IS_ERR(aac->thread)) {
1112		retval = PTR_ERR(aac->thread);
1113		goto out;
1114	}
1115	(void)aac_get_adapter_info(aac);
1116	quirks = aac_get_driver_ident(index)->quirks;
1117	if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1118 		host->sg_tablesize = 34;
1119 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1120 	}
1121 	if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1122 		host->sg_tablesize = 17;
1123 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1124 	}
1125	aac_get_config_status(aac, 1);
1126	aac_get_containers(aac);
1127	/*
1128	 * This is where the assumption that the Adapter is quiesced
1129	 * is important.
1130	 */
1131	command_list = NULL;
1132	__shost_for_each_device(dev, host) {
1133		unsigned long flags;
1134		spin_lock_irqsave(&dev->list_lock, flags);
1135		list_for_each_entry(command, &dev->cmd_list, list)
1136			if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1137				command->SCp.buffer = (struct scatterlist *)command_list;
1138				command_list = command;
1139			}
1140		spin_unlock_irqrestore(&dev->list_lock, flags);
1141	}
1142	while ((command = command_list)) {
1143		command_list = (struct scsi_cmnd *)command->SCp.buffer;
1144		command->SCp.buffer = NULL;
1145		command->result = DID_OK << 16
1146		  | COMMAND_COMPLETE << 8
1147		  | SAM_STAT_TASK_SET_FULL;
1148		command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1149		command->scsi_done(command);
1150	}
1151	retval = 0;
1152
1153out:
1154	aac->in_reset = 0;
1155	scsi_unblock_requests(host);
1156	spin_lock_irq(host->host_lock);
1157	return retval;
1158}
1159
1160int aac_check_health(struct aac_dev * aac)
1161{
1162	int BlinkLED;
1163	unsigned long time_now, flagv = 0;
1164	struct list_head * entry;
1165	struct Scsi_Host * host;
1166
1167	/* Extending the scope of fib_lock slightly to protect aac->in_reset */
1168	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1169		return 0;
1170
1171	if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1172		spin_unlock_irqrestore(&aac->fib_lock, flagv);
1173		return 0; /* OK */
1174	}
1175
1176	aac->in_reset = 1;
1177
1178	/* Fake up an AIF:
1179	 *	aac_aifcmd.command = AifCmdEventNotify = 1
1180	 *	aac_aifcmd.seqnum = 0xFFFFFFFF
1181	 *	aac_aifcmd.data[0] = AifEnExpEvent = 23
1182	 *	aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1183	 *	aac.aifcmd.data[2] = AifHighPriority = 3
1184	 *	aac.aifcmd.data[3] = BlinkLED
1185	 */
1186
1187	time_now = jiffies/HZ;
1188	entry = aac->fib_list.next;
1189
1190	/*
1191	 * For each Context that is on the
1192	 * fibctxList, make a copy of the
1193	 * fib, and then set the event to wake up the
1194	 * thread that is waiting for it.
1195	 */
1196	while (entry != &aac->fib_list) {
1197		/*
1198		 * Extract the fibctx
1199		 */
1200		struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1201		struct hw_fib * hw_fib;
1202		struct fib * fib;
1203		/*
1204		 * Check if the queue is getting
1205		 * backlogged
1206		 */
1207		if (fibctx->count > 20) {
1208			/*
1209			 * It's *not* jiffies folks,
1210			 * but jiffies / HZ, so do not
1211			 * panic ...
1212			 */
1213			u32 time_last = fibctx->jiffies;
1214			/*
1215			 * Has it been > 2 minutes
1216			 * since the last read off
1217			 * the queue?
1218			 */
1219			if ((time_now - time_last) > aif_timeout) {
1220				entry = entry->next;
1221				aac_close_fib_context(aac, fibctx);
1222				continue;
1223			}
1224		}
1225		/*
1226		 * Warning: no sleep allowed while
1227		 * holding spinlock
1228		 */
1229		hw_fib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1230		fib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
1231		if (fib && hw_fib) {
1232			struct aac_aifcmd * aif;
1233
1234			memset(hw_fib, 0, sizeof(struct hw_fib));
1235			memset(fib, 0, sizeof(struct fib));
1236			fib->hw_fib_va = hw_fib;
1237			fib->dev = aac;
1238			aac_fib_init(fib);
1239			fib->type = FSAFS_NTC_FIB_CONTEXT;
1240			fib->size = sizeof (struct fib);
1241			fib->data = hw_fib->data;
1242			aif = (struct aac_aifcmd *)hw_fib->data;
1243			aif->command = cpu_to_le32(AifCmdEventNotify);
1244		 	aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1245		 	aif->data[0] = cpu_to_le32(AifEnExpEvent);
1246			aif->data[1] = cpu_to_le32(AifExeFirmwarePanic);
1247		 	aif->data[2] = cpu_to_le32(AifHighPriority);
1248			aif->data[3] = cpu_to_le32(BlinkLED);
1249
1250			/*
1251			 * Put the FIB onto the
1252			 * fibctx's fibs
1253			 */
1254			list_add_tail(&fib->fiblink, &fibctx->fib_list);
1255			fibctx->count++;
1256			/*
1257			 * Set the event to wake up the
1258			 * thread that will waiting.
1259			 */
1260			up(&fibctx->wait_sem);
1261		} else {
1262			printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1263			kfree(fib);
1264			kfree(hw_fib);
1265		}
1266		entry = entry->next;
1267	}
1268
1269	spin_unlock_irqrestore(&aac->fib_lock, flagv);
1270
1271	if (BlinkLED < 0) {
1272		printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1273		goto out;
1274	}
1275
1276	printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1277
1278	host = aac->scsi_host_ptr;
1279	spin_lock_irqsave(host->host_lock, flagv);
1280	BlinkLED = _aac_reset_adapter(aac);
1281	spin_unlock_irqrestore(host->host_lock, flagv);
1282	return BlinkLED;
1283
1284out:
1285	aac->in_reset = 0;
1286	return BlinkLED;
1287}
1288
1289
1290/**
1291 *	aac_command_thread	-	command processing thread
1292 *	@dev: Adapter to monitor
1293 *
1294 *	Waits on the commandready event in it's queue. When the event gets set
1295 *	it will pull FIBs off it's queue. It will continue to pull FIBs off
1296 *	until the queue is empty. When the queue is empty it will wait for
1297 *	more FIBs.
1298 */
1299
1300int aac_command_thread(void *data)
1301{
1302	struct aac_dev *dev = data;
1303	struct hw_fib *hw_fib, *hw_newfib;
1304	struct fib *fib, *newfib;
1305	struct aac_fib_context *fibctx;
1306	unsigned long flags;
1307	DECLARE_WAITQUEUE(wait, current);
1308
1309	/*
1310	 *	We can only have one thread per adapter for AIF's.
1311	 */
1312	if (dev->aif_thread)
1313		return -EINVAL;
1314
1315	/*
1316	 *	Let the DPC know it has a place to send the AIF's to.
1317	 */
1318	dev->aif_thread = 1;
1319	add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1320	set_current_state(TASK_INTERRUPTIBLE);
1321	dprintk ((KERN_INFO "aac_command_thread start\n"));
1322	while(1)
1323	{
1324		spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1325		while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
1326			struct list_head *entry;
1327			struct aac_aifcmd * aifcmd;
1328
1329			set_current_state(TASK_RUNNING);
1330
1331			entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
1332			list_del(entry);
1333
1334			spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1335			fib = list_entry(entry, struct fib, fiblink);
1336			/*
1337			 *	We will process the FIB here or pass it to a
1338			 *	worker thread that is TBD. We Really can't
1339			 *	do anything at this point since we don't have
1340			 *	anything defined for this thread to do.
1341			 */
1342			hw_fib = fib->hw_fib_va;
1343			memset(fib, 0, sizeof(struct fib));
1344			fib->type = FSAFS_NTC_FIB_CONTEXT;
1345			fib->size = sizeof( struct fib );
1346			fib->hw_fib_va = hw_fib;
1347			fib->data = hw_fib->data;
1348			fib->dev = dev;
1349			/*
1350			 *	We only handle AifRequest fibs from the adapter.
1351			 */
1352			aifcmd = (struct aac_aifcmd *) hw_fib->data;
1353			if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
1354				/* Handle Driver Notify Events */
1355				aac_handle_aif(dev, fib);
1356				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1357				aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1358			} else {
1359				struct list_head *entry;
1360				/* The u32 here is important and intended. We are using
1361				   32bit wrapping time to fit the adapter field */
1362
1363				u32 time_now, time_last;
1364				unsigned long flagv;
1365				unsigned num;
1366				struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1367				struct fib ** fib_pool, ** fib_p;
1368
1369				/* Sniff events */
1370				if ((aifcmd->command ==
1371				     cpu_to_le32(AifCmdEventNotify)) ||
1372				    (aifcmd->command ==
1373				     cpu_to_le32(AifCmdJobProgress))) {
1374					aac_handle_aif(dev, fib);
1375				}
1376
1377				time_now = jiffies/HZ;
1378
1379				/*
1380				 * Warning: no sleep allowed while
1381				 * holding spinlock. We take the estimate
1382				 * and pre-allocate a set of fibs outside the
1383				 * lock.
1384				 */
1385				num = le32_to_cpu(dev->init->AdapterFibsSize)
1386				    / sizeof(struct hw_fib); /* some extra */
1387				spin_lock_irqsave(&dev->fib_lock, flagv);
1388				entry = dev->fib_list.next;
1389				while (entry != &dev->fib_list) {
1390					entry = entry->next;
1391					++num;
1392				}
1393				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1394				hw_fib_pool = NULL;
1395				fib_pool = NULL;
1396				if (num
1397				 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1398				 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1399					hw_fib_p = hw_fib_pool;
1400					fib_p = fib_pool;
1401					while (hw_fib_p < &hw_fib_pool[num]) {
1402						if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1403							--hw_fib_p;
1404							break;
1405						}
1406						if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1407							kfree(*(--hw_fib_p));
1408							break;
1409						}
1410					}
1411					if ((num = hw_fib_p - hw_fib_pool) == 0) {
1412						kfree(fib_pool);
1413						fib_pool = NULL;
1414						kfree(hw_fib_pool);
1415						hw_fib_pool = NULL;
1416					}
1417				} else {
1418					kfree(hw_fib_pool);
1419					hw_fib_pool = NULL;
1420				}
1421				spin_lock_irqsave(&dev->fib_lock, flagv);
1422				entry = dev->fib_list.next;
1423				/*
1424				 * For each Context that is on the
1425				 * fibctxList, make a copy of the
1426				 * fib, and then set the event to wake up the
1427				 * thread that is waiting for it.
1428				 */
1429				hw_fib_p = hw_fib_pool;
1430				fib_p = fib_pool;
1431				while (entry != &dev->fib_list) {
1432					/*
1433					 * Extract the fibctx
1434					 */
1435					fibctx = list_entry(entry, struct aac_fib_context, next);
1436					/*
1437					 * Check if the queue is getting
1438					 * backlogged
1439					 */
1440					if (fibctx->count > 20)
1441					{
1442						/*
1443						 * It's *not* jiffies folks,
1444						 * but jiffies / HZ so do not
1445						 * panic ...
1446						 */
1447						time_last = fibctx->jiffies;
1448						/*
1449						 * Has it been > 2 minutes
1450						 * since the last read off
1451						 * the queue?
1452						 */
1453						if ((time_now - time_last) > aif_timeout) {
1454							entry = entry->next;
1455							aac_close_fib_context(dev, fibctx);
1456							continue;
1457						}
1458					}
1459					/*
1460					 * Warning: no sleep allowed while
1461					 * holding spinlock
1462					 */
1463					if (hw_fib_p < &hw_fib_pool[num]) {
1464						hw_newfib = *hw_fib_p;
1465						*(hw_fib_p++) = NULL;
1466						newfib = *fib_p;
1467						*(fib_p++) = NULL;
1468						/*
1469						 * Make the copy of the FIB
1470						 */
1471						memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1472						memcpy(newfib, fib, sizeof(struct fib));
1473						newfib->hw_fib_va = hw_newfib;
1474						/*
1475						 * Put the FIB onto the
1476						 * fibctx's fibs
1477						 */
1478						list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1479						fibctx->count++;
1480						/*
1481						 * Set the event to wake up the
1482						 * thread that is waiting.
1483						 */
1484						up(&fibctx->wait_sem);
1485					} else {
1486						printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1487					}
1488					entry = entry->next;
1489				}
1490				/*
1491				 *	Set the status of this FIB
1492				 */
1493				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1494				aac_fib_adapter_complete(fib, sizeof(u32));
1495				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1496				/* Free up the remaining resources */
1497				hw_fib_p = hw_fib_pool;
1498				fib_p = fib_pool;
1499				while (hw_fib_p < &hw_fib_pool[num]) {
1500					kfree(*hw_fib_p);
1501					kfree(*fib_p);
1502					++fib_p;
1503					++hw_fib_p;
1504				}
1505				kfree(hw_fib_pool);
1506				kfree(fib_pool);
1507			}
1508			kfree(fib);
1509			spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1510		}
1511		/*
1512		 *	There are no more AIF's
1513		 */
1514		spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1515		schedule();
1516
1517		if (kthread_should_stop())
1518			break;
1519		set_current_state(TASK_INTERRUPTIBLE);
1520	}
1521	if (dev->queues)
1522		remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1523	dev->aif_thread = 0;
1524	return 0;
1525}
1526